-
Notifications
You must be signed in to change notification settings - Fork 2
/
environment
388 lines (357 loc) · 10.2 KB
/
environment
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
# This file uses Bash Here Documents that have to be indented with tabs so we
# uniformly use tabs for unaligned indent and tabs followed by spaces for
# aligned indent.
project_root="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
export project_root
# A little helper to make re-sourcing this script easier.
#
_refresh() {
source "${project_root}/environment"
}
# Manage the symlink to the active deployment
#
_select() {
if [ -z "$1" ] ; then
_show_link
else
if ! {
_validate_link "$1" &&
_logout &&
_link "$1" &&
_refresh &&
_login ;
} ; then
echo >&2 "_select failed"
return 1
fi
fi
}
_deselect() {
if ! {
_logout &&
_unlink &&
_refresh ;
} ; then
echo >&2 "_deselect failed"
return 1
fi
}
_show_link() {
( cd "${project_root}/deployments" && ls -l .active )
}
_validate_link() {
d="${project_root}/deployments/$1"
# -d dereferences symlinks so with -L we make sure the argument isn't one
if ! { [ ! -L "$d" ] && [ -d "$d" ] ; } ; then
echo >&2 "_validate_link failed: '$1'"
return 1
fi
}
_link() {
if ! (
_validate_link "$1" &&
cd "${project_root}/deployments" &&
{ [ ! -e .active ] || { [ -L .active ] && rm .active ; } ; } &&
ln -s "$1" .active
) ; then
echo >&2 "_link failed: '$1'"
return 1
fi
}
_unlink() {
rm "${project_root}/deployments/.active"
}
_login() {
if {
_login_google &&
_login_aws &&
_login_docker_ecr &&
_login_docker_gitlab ;
} ; then
echo >&2 \
"Session credentials are in effect for AWS. Additionally, you have" \
"been logged into Google Cloud, Amazon ECR and the GitLab Docker" \
"registry. Use '_logout' to invalidate the session credentials." \
"Alternatively, you can use _logout_completely to invalidate all" \
"credentials but this is usually not necessary."
else
echo >&2 "_login failed"
return 1
fi
}
_logout() {
# Docker segregates credential state by registry and we maintain separate
# registries (both ECR and GitLab) per deployment so we won't need to log out
# of those registries when switching deployments. Above, we offer dedicated
# functions for explicitly logging our of those registries.
_logout_aws
# We segregate Google state by deployment and working copy (see
# CLOUDSDK_CONFIG in environment.py) so we don't need to log out of Google
# when switching deployments. Above we offer a dedicated function for
# explicitly logging out of Google.
}
_logout_completely() {
# We don't use `&&` between function invocations because failing to log out of
# one realm shouldn't prevent us from attempting to log out of the others.
_logout_google
_logout
_logout_docker_ecr
_logout_docker_gitlab
}
_login_google() {
if [ -n "$azul_google_user" ] ; then
if ! {
gcloud auth login --update-adc --quiet "$azul_google_user" &&
gcloud config set project "$GOOGLE_PROJECT" &&
gcloud auth application-default set-quota-project "$GOOGLE_PROJECT" ;
} ; then
echo >&2 "_login_google failed"
return 1
fi
fi
}
_logout_google() {
if [ -n "$azul_google_user" ] ; then
if ! {
gcloud auth application-default revoke --quiet &&
gcloud auth revoke --quiet ;
} ; then
echo >&2 "_logout_google failed"
return 1
fi
fi
}
# Get temporary credentials from STS via AssumeRole and inject them
# into the current environment where other AWS client libraries can
# find them.
#
# https://github.com/boto/boto3/issues/1179#issuecomment-569940530
#
_login_aws() {
local env
if ! env="$(
python - <<- "EOF"
from pathlib import Path
import botocore.credentials
import botocore.session
import botocore.utils
# Get the AssumeRole credential provider and make it the only one
session = botocore.session.get_session()
resolver = session.get_component('credential_provider')
provider = resolver.get_provider('assume-role')
resolver.providers = [provider]
# Make the provider use the same cache as the AWS CLI
cli_cache = Path('~', '.aws', 'cli', 'cache').expanduser()
provider.cache = botocore.utils.JSONFileCache(cli_cache)
# Request the credentials. If the CLI has cached credentials, this step
# would use those. If not, fresh ones will be requested from STS. If that
# requires entering an MFA code, the user will be prompted. The fresh
# credentials will be cached such that subsequent invocations just use
# them until they expire. Setting the expiration window to four hours
# ensures that cached credentials will be valid for at least four more
# hours. If they are not, new credentials will be requested from STS.
#
botocore.credentials.AssumeRoleCredentialFetcher.DEFAULT_EXPIRY_WINDOW_SECONDS = 4 * 60 * 60
credentials = session.get_credentials()
# Print the bash statement so they can be eval-ed
print(f'export AWS_ACCESS_KEY_ID={credentials.access_key}')
print(f'export AWS_SECRET_ACCESS_KEY={credentials.secret_key}')
print(f'export AWS_SESSION_TOKEN={credentials.token}')
EOF
)" ; then
echo >&2 "_login_aws failed"
return 1
fi
eval "$env"
}
_logout_aws() {
unset AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN
}
_login_docker_ecr() {
if [ -n "${azul_docker_registry:+x}" ] ; then
if ! (
set -o pipefail
aws ecr get-login-password --region us-east-1 |
docker login \
--username AWS \
--password-stdin \
"${azul_docker_registry%/}"
) ; then
echo >&2 "_login_docker_ecr failed"
return 1
fi
fi
}
_logout_docker_ecr() {
if [ -n "${azul_docker_registry:+x}" ] ; then
if ! {
docker logout "${azul_docker_registry}" ;
} ; then
echo >&2 "_logout_docker_ecr failed"
return 1
fi
fi
}
_login_docker_gitlab() {
if {
[ -n "${azul_gitlab_access_token:+x}" ] &&
[ -n "${azul_gitlab_user:+x}" ] ;
} ; then
if ! (
set -o pipefail
printenv azul_gitlab_access_token |
docker login \
--username "${azul_gitlab_user}" \
--password-stdin \
"docker.gitlab.${AZUL_DOMAIN_NAME}"
) ; then
echo >&2 "_login_docker_gitlab failed"
return 1
fi
fi
}
_logout_docker_gitlab() {
if {
[ -n "${azul_gitlab_access_token:+x}" ] &&
[ -n "${azul_gitlab_user:+x}" ] ;
} ; then
if ! docker logout "docker.gitlab.${AZUL_DOMAIN_NAME}" ; then
echo >&2 "_logout_docker_gitlab failed"
return 1
fi
fi
}
_revenv() {
if ! {
deactivate &&
make virtualenv &&
source .venv/bin/activate &&
make requirements envhook ;
} ; then
echo >&2 "_revenv failed"
return 1
fi
}
_clone() {
if [[ -z "$1" || -z "$2" ]]; then
echo "Need two arguments: the name of the deployment to select in the"
echo "project clone and the name of the branch to check out in it."
return 1
fi
if [ -n "$VIRTUAL_ENV" ]; then
echo "Run 'deactivate' first"
return 2
fi
if [ "$(basename "$PWD")" != azul ]; then
echo "The name of the current project directory must be 'azul'"
return 3
fi
deployment="$1"
branch="$2"
(
set -e
git worktree add "../azul.${deployment}" "${branch}"
cd "../azul.${deployment}"
(cd terraform/gitlab/vpn && git submodule update --init easy-rsa)
_update_clone
source environment
_link "${deployment}"
_refresh
make virtualenv
source .venv/bin/activate
make requirements envhook
deactivate
rsync -av ../azul/.idea .
mv .idea/azul.iml ".idea/azul.${deployment}.iml"
sed -e '/<component name="ProjectId".*/d' \
-e s#\<module\ name=\"azul\"#\<module\ name=\""azul.${deployment}"\"#g \
-i '' \
.idea/workspace.xml
sed -e s#.idea/azul.iml#.idea/azul."${deployment}".iml#g \
-i '' \
.idea/modules.xml
echo ""
echo "You can now open the directory"
echo ""
pwd
echo ""
echo "as a project in PyCharm. Be sure to verify that the right .venv "
echo "is configured as the Python interpreter in the project settings."
echo ""
echo "To remove the clone, close the project in PyCharm, delete the"
echo "project directory, and run 'git worktree prune'"
echo ""
)
}
_update_clone() {
rsync -rvlm \
-f '+ */' \
-f '+ environment.local.py' \
-f '+ /deployments/*.local/environment*.py' \
-f '- *' \
../azul/ \
.
}
# Destroy the most expensive resources in a main deployment and its components.
# Compared to complete destruction, hibernation has the advantage of needing
# less time to come back from, and not requiring incrementing the incarnation
# counter or adding service accounts to Terra groups. This function was written
# from memory. The `terraform` commands were tested individually but the
# function as a whole was not.
#
_hibernate() {
# shellcheck disable=SC2154
if test -z "$azul_terraform_component"; then
make -C lambdas && {
cd terraform &&
make validate && {
terraform destroy \
-target aws_cloudwatch_metric_alarm.indexercachehealth \
-target aws_cloudwatch_metric_alarm.servicecachehealth \
-target aws_elasticsearch_domain.index
} && {
cd shared &&
_select "$AZUL_DEPLOYMENT_STAGE.shared" &&
make validate &&
terraform destroy \
-target aws_cloudwatch_metric_alarm.clamscan \
-target aws_cloudwatch_metric_alarm.freshclam
} && {
cd gitlab &&
_select "$AZUL_DEPLOYMENT_STAGE.gitlab" &&
make validate &&
terraform destroy \
-target aws_cloudwatch_metric_alarm.gitlab_cpu_use \
-target aws_cloudwatch_metric_alarm.gitlab_data_disk_use \
-target aws_cloudwatch_metric_alarm.gitlab_root_disk_use \
-target aws_ec2_client_vpn_endpoint.gitlab \
-target aws_instance.gitlab \
-target aws_nat_gateway.gitlab_0 \
-target aws_nat_gateway.gitlab_1 \
-target aws_lb.gitlab_nlb \
-target aws_lb.gitlab_alb
}
}
else
echo "Must have main component selected"
return 1
fi
}
# We disable `envhook.py` to avoid redundancy. The `envhook.py` script imports
# `export_environment.py`, too. We could also pass -S to `python3` but that
# causes problems on Travis (`importlib.util` failing to import `contextlib`).
#
eval "$(ENVHOOK=0 python3 "${project_root}/scripts/export_environment.py" || echo false)"
# Auto-completion for _select
#
_complete_env() {
# https://stuff-things.net/2016/05/11/bash-autocompletion/
local env envs
env="${COMP_WORDS[COMP_CWORD]}"
# * because of https://unix.stackexchange.com/a/537241/448602
envs=$(find "${project_root}/deployments" -maxdepth 1 -type d -print0 | xargs -0 basename)
# shellcheck disable=SC2207
COMPREPLY=($(compgen -W "$envs" -- "${env}"))
return 0
}
complete -F _complete_env _select _link _validate_link