-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathsetup.sh
More file actions
executable file
·243 lines (226 loc) · 9.66 KB
/
setup.sh
File metadata and controls
executable file
·243 lines (226 loc) · 9.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
#!/usr/bin/env bash
# You need gcloud command-line tools installed. This script will create
# resources that incur billing charges. Delete the project this script creates
# when you're done with it!
set -o errexit
set -o nounset
if [ $# -ge 1 ]; then
PROJECTNAME=$1
else
rndint=$((RANDOM % 100000))
echo "What do you want to name your project? e.g., bmarkgcp$rndint"
read PROJECTNAME
fi
PROJECTNAME="$(echo -e "${PROJECTNAME}" | tr -d '[:space:]')"
if [ -z $PROJECTNAME ]; then
echo "project name is required"
exit 1
fi
pnamesz=${#PROJECTNAME}
if [ $pnamesz -gt 13 ]; then
# if it is too long, then some of the targeted urls' hostnames will be
# longer than allowed on GAE (<version>-dot-<service>-dot-<project> must
# be less than 64 characters)
echo "project name cannot be longer than 13 characters"
exit 1
fi
pip install --upgrade google-auth-oauthlib requests boto3
gcloud components install beta
gcloud components update
gcloud organizations list
echo "What organization ID do you want your new project to belong to?"
read ORGID
ORGID="$(echo -e "${ORGID}" | tr -d '[:space:]')"
gcloud beta billing accounts list
echo "What billing account ID do you want your new project to use?"
read ACCOUNTID
ACCOUNTID="$(echo -e "${ACCOUNTID}" | tr -d '[:space:]')"
set -o xtrace
gcloud auth application-default login
gcloud projects create $PROJECTNAME --set-as-default --organization $ORGID
gcloud beta billing projects link $PROJECTNAME --billing-account=$ACCOUNTID
gcloud app create --region=us-central
echo "Please go turn on dedicated memcache for your GAE app ... done?"
read ignore
echo "Please be patient; setting up Memorystore (Redis) is quite slow ..."
gcloud services enable redis.googleapis.com
gcloud redis instances create testcluster --size=1 --region=us-central1 \
--zone=us-central1-a --tier=STANDARD
redishost="$(gcloud redis instances describe testcluster --region=us-central1 \
| fgrep host | cut -d: -f2 | cut -d' ' -f2)"
redisport="$(gcloud redis instances describe testcluster --region=us-central1 \
| fgrep port | cut -d: -f2 | cut -d' ' -f2)"
redisnet="$(gcloud redis instances describe testcluster --region=us-central1 \
| fgrep authorizedNetwork | cut -d: -f2 | cut -d' ' -f2)"
gcloud services enable vpcaccess.googleapis.com
gcloud beta compute networks vpc-access connectors create conntest \
--network default --region us-central1 --range 10.8.0.0/28
# second gen runtimes need to be connected to the VPC where redis is running
# and told how to connect to it
vpcname="$(gcloud beta compute networks vpc-access connectors describe \
conntest --region us-central1 \
| fgrep name | cut -d: -f2 | cut -d' ' -f2)"
generatedYamlFN=./tmp-generated.yaml
echo "vpc_access_connector:" > $generatedYamlFN
echo " name: $vpcname" >> $generatedYamlFN
echo "env_variables:" >> $generatedYamlFN
echo " REDIS_HOST: \"$redishost\"" >> $generatedYamlFN
echo " REDIS_PORT: \"$redisport\"" >> $generatedYamlFN
secondGenRuntimes=('py37' 'node10' 'node12')
for start in `seq 0 2`; do
runtime=${secondGenRuntimes[$start]}
cat ./platforms/gae_standard/${runtime}/template.yaml $generatedYamlFN \
> ./platforms/gae_standard/${runtime}/template-generated.yaml
done
rm $generatedYamlFN
# cloud run needs redis connection info too
echo "ENV REDIS_HOST $redishost" > ./platforms/cloud_run/.redis_info
echo "ENV REDIS_PORT $redisport" >> ./platforms/cloud_run/.redis_info
echo "setting up Cloud Tasks ..."
gcloud services enable cloudtasks.googleapis.com
gcloud services enable tasks.googleapis.com
gcloud iam service-accounts create testcloudtasks
gcloud projects add-iam-policy-binding $PROJECTNAME \
--member "serviceAccount:testcloudtasks@$PROJECTNAME.iam.gserviceaccount.com" \
--role "roles/cloudtasks.admin"
gcloud projects add-iam-policy-binding $PROJECTNAME \
--member "serviceAccount:testcloudtasks@$PROJECTNAME.iam.gserviceaccount.com" \
--role "roles/appengine.appViewer"
gcloud iam service-accounts keys create \
platforms/gae_standard/py37/cloudtasksaccount.json \
--iam-account testcloudtasks@$PROJECTNAME.iam.gserviceaccount.com
# routing override is specified because app_engine_routing seems to be broken
# and ignored when used from managed cloud run (works fine from GAE though)
gcloud tasks queues create testpy3 \
--max-concurrent-dispatches=0 \
--max-attempts=0 \
--routing-override='service:py3,version:txtaskhandler'
gcloud tasks queues create test \
--max-concurrent-dispatches=0 \
--max-attempts=0
### setup for GKE
# create service account for our GKE clusters to use to access datastore, task
# queue, redis and stackdriver
gcloud iam service-accounts create forcloudrun
member="serviceAccount:forcloudrun@$PROJECTNAME.iam.gserviceaccount.com"
gcloud projects add-iam-policy-binding $PROJECTNAME \
--member $member \
--role "roles/cloudtasks.enqueuer"
gcloud projects add-iam-policy-binding $PROJECTNAME \
--member $member \
--role "roles/datastore.user"
gcloud projects add-iam-policy-binding $PROJECTNAME \
--member $member \
--role "roles/storage.objectViewer"
gcloud projects add-iam-policy-binding $PROJECTNAME \
--member $member \
--role "roles/spanner.databaseUser"
gcloud projects add-iam-policy-binding $PROJECTNAME \
--member $member \
--role "roles/redis.editor"
gcloud projects add-iam-policy-binding $PROJECTNAME \
--member $member \
--role "roles/logging.logWriter"
gcloud projects add-iam-policy-binding $PROJECTNAME \
--member $member \
--role "roles/monitoring.metricWriter"
gcloud projects add-iam-policy-binding $PROJECTNAME \
--member $member \
--role "roles/stackdriver.resourceMetadata.writer"
# need to be able to access builds in order to deploy them
gsutil iam ch serviceAccount:forcloudrun@${PROJECTNAME}.iam.gserviceaccount.com:objectViewer gs://artifacts.${PROJECTNAME}.appspot.com
gcloud iam service-accounts keys create \
platforms/cloud_run/serviceaccount.json \
--iam-account forcloudrun@$PROJECTNAME.iam.gserviceaccount.com
# enable GKE
gcloud components install kubectl --quiet
gcloud services enable container.googleapis.com
gcloud services enable cloudbuild.googleapis.com
gcloud services enable logging.googleapis.com
gcloud services enable monitoring.googleapis.com
gcloud services enable stackdriver.googleapis.com
# put our clusters in the same region and zone as our benchmarker
machineTypes=('n1-highcpu-2' 'n2-highcpu-2')
for start in `seq 0 1`; do
machineType=${machineTypes[$start]}
if [ $machineType == 'c2-standard-4' ]; then
zone='us-central1-b' # not available in zone a yet
else
zone='us-central1-a'
fi
clusterName=cluster-$machineType
gcloud beta container clusters create $clusterName \
--machine-type=$machineType \
--addons=HorizontalPodAutoscaling,HttpLoadBalancing,CloudRun \
--scopes cloud-platform \
--metadata disable-legacy-endpoints=true \
--enable-ip-alias \
--no-issue-client-certificate \
--no-enable-basic-auth \
--enable-autorepair \
--enable-autoupgrade \
--enable-stackdriver-kubernetes \
--zone=$zone \
--enable-autoscaling \
--min-nodes=0 \
--max-nodes=100 \
--num-nodes=3 \
--service-account=forcloudrun@$PROJECTNAME.iam.gserviceaccount.com
done
# clusters take some time to startup, so we create the clusters and then we try
# to get their IPs later
sleep 60
for start in `seq 0 1`; do
machineType=${machineTypes[$start]}
if [ $machineType == 'c2-standard-4' ]; then
zone='us-central1-b' # not available in zone us-central1-a yet
else
zone='us-central1-a'
fi
clusterName=cluster-$machineType
# get the public IP address through which we can access our service
kubectl get service istio-ingress --namespace gke-system \
--cluster gke_${PROJECTNAME}_${zone}_${clusterName} \
--output='jsonpath={.status.loadBalancer.ingress[0].ip}' \
> platforms/cloud_run/clusterip_${machineType}.txt
# don't need this (default) addon
gcloud container clusters update $clusterName --update-addons=KubernetesDashboard=DISABLED --zone=$zone
done
# cloud run
gcloud services enable run.googleapis.com
# alpha required to be able to update cloud run service yamls
gcloud components install alpha --quiet
echo "Setting up spanner db"
gcloud services enable spanner.googleapis.com
gcloud spanner instances create default \
--config=regional-us-central1 \
--description="Default spanner instance" \
--nodes=1
gcloud spanner databases create default \
--instance=default \
--ddl="CREATE TABLE JSCounter (id STRING(256), count INT64) PRIMARY KEY(id); \
CREATE TABLE BigJsonHolder (id STRING(256), data STRING(2621440)) PRIMARY KEY(id); \
CREATE TABLE OneInt (id INT64) PRIMARY KEY(id); \
CREATE TABLE JSTxDoneSentinel (id STRING(256)) PRIMARY KEY(id);" \
echo "Populating spanner db tables..."
for i in $(seq 0 9); do
values="($((i * 1000)))"
for j in $(seq 1 999); do
values="$values, ($((i * 1000 + j)))"
done
gcloud spanner databases execute-sql default \
--instance=default \
--sql="INSERT OneInt (id) VALUES $values"
done
./platforms/deploy.py $PROJECTNAME
pushd benchmark
./deploy.sh
popd
echo "creating datastore entities for benchmarking ..."
for start in `seq 0 1000 9000`; do
if [ $start -ne 0 ]; then
echo " $start done"
fi
curl -d "s=$start&n=1000" -X POST https://webapp-f1-solo-dbindir-dot-py27-dot-$PROJECTNAME.appspot.com/test/dbindir
done
echo ' done creating entites!'