diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..3746fd1 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,15 @@ +{ + "name": "AWS CDK & Python Development Environment", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-22.04", + "features": { + "ghcr.io/devcontainers/features/node:1.5.0": { + "version": "22.6.0" + }, + "ghcr.io/devcontainers/features/python:1.6.3": { + "version": "3.12.0" + }, + "ghcr.io/devcontainers/features/aws-cli:1": {} + }, + "postCreateCommand": "./tools/setup.sh", + "shutdownAction": "stopContainer" +} diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..605fd1b --- /dev/null +++ b/.flake8 @@ -0,0 +1,16 @@ +[flake8] +exclude = + .git, + __pycache__, + build, + dist, + .tox, + venv, + .venv, + .pytest_cache +max-complexity = 12 +#per-file-ignores = +# docs/_api/conf.py: E265 +# integration-tests/steps/*: E501,F811,F403,F405 +extend-ignore = E203 +max-line-length = 120 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..3ff6571 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @Sage-Bionetworks-IT/sagebio-it @Sage-Bionetworks-IT/infra-oversight-committee diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..ffe15eb --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,7 @@ +DELETE THIS TEMPLATE BEFORE SUBMITTING + +PR Checklist: +[ ] Clearly explain your change with a descriptive commit message + +[ ] Setup pre-commit and run the validators (info in README.md) + To validate files run: `pre-commit run --all-files` diff --git a/.github/workflows/aws-deploy.yaml b/.github/workflows/aws-deploy.yaml new file mode 100644 index 0000000..08334ca --- /dev/null +++ b/.github/workflows/aws-deploy.yaml @@ -0,0 +1,53 @@ +# reusable template for deployments to AWS accounts +name: aws-deploy + +# Ensures that only one deploy task per branch/environment will run at a time. +concurrency: + group: ${{ inputs.environment }} + cancel-in-progress: false + +on: + workflow_call: + inputs: + aws-region: + type: string + default: us-east-1 + role-to-assume: + required: true + type: string + role-session-name: + required: true + type: string + role-duration-seconds: + type: number + default: 3600 + environment: + required: true + type: string + +jobs: + deploy: + permissions: + id-token: write + contents: read + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install AWS CLI + run: sudo snap install aws-cli --classic + - name: Install AWS CDK CLI + run: npm install -g aws-cdk + - name: Install python dependencies + run: pip install -r requirements.txt -r requirements-dev.txt + - name: Assume AWS Role + uses: aws-actions/configure-aws-credentials@v2 + with: + aws-region: ${{ inputs.aws-region }} + role-to-assume: ${{ inputs.role-to-assume }} + role-session-name: ${{ inputs.role-session-name }} + role-duration-seconds: ${{ inputs.role-duration-seconds }} + - name: CDK deploy + run: cdk deploy --all --concurrency 5 --require-approval never + env: + ENV: ${{ inputs.environment }} diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml new file mode 100644 index 0000000..8b08013 --- /dev/null +++ b/.github/workflows/check.yml @@ -0,0 +1,34 @@ +name: check + +on: + pull_request: + branches: ['*'] + push: + branches: ['*'] + +jobs: + unit-tests: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install dependencies + run: pip install -r requirements.txt -r requirements-dev.txt + - name: Run unit tests + run: python -m pytest tests/ -s -v + synth: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install dependencies + run: pip install -r requirements.txt -r requirements-dev.txt + - name: Generate cloudformation + uses: youyo/aws-cdk-github-actions@v2 + env: + ENV: dev + with: + cdk_subcommand: 'synth' + actions_comment: false + debug_log: true + cdk_args: '--output ./cdk.out' diff --git a/.github/workflows/deploy-dev.yaml b/.github/workflows/deploy-dev.yaml new file mode 100644 index 0000000..7016982 --- /dev/null +++ b/.github/workflows/deploy-dev.yaml @@ -0,0 +1,18 @@ +name: deploy-dev + +on: + workflow_run: + workflows: + - check + types: + - completed + branches: + - dev + +jobs: + aws-deploy: + uses: "./.github/workflows/aws-deploy.yaml" + with: + role-to-assume: "arn:aws:iam::631692904429:role/sagebase-github-oidc-sage-bionetworks-it-schematic-infra-v2" + role-session-name: ${{ github.repository_owner }}-${{ github.event.repository.name }}-${{ github.run_id }} + environment: dev diff --git a/.github/workflows/deploy-prod.yaml b/.github/workflows/deploy-prod.yaml new file mode 100644 index 0000000..14a38aa --- /dev/null +++ b/.github/workflows/deploy-prod.yaml @@ -0,0 +1,18 @@ +name: deploy-prod + +on: + workflow_run: + workflows: + - check + types: + - completed + branches: + - prod + +jobs: + aws-deploy: + uses: "./.github/workflows/aws-deploy.yaml" + with: + role-to-assume: "arn:aws:iam::878654265857:role/sagebase-github-oidc-sage-bionetworks-it-schematic-infra-v2" + role-session-name: ${{ github.repository_owner }}-${{ github.event.repository.name }}-${{ github.run_id }} + environment: prod diff --git a/.github/workflows/deploy-stage.yaml b/.github/workflows/deploy-stage.yaml new file mode 100644 index 0000000..97b8069 --- /dev/null +++ b/.github/workflows/deploy-stage.yaml @@ -0,0 +1,18 @@ +name: deploy-stage + +on: + workflow_run: + workflows: + - check + types: + - completed + branches: + - stage + +jobs: + aws-deploy: + uses: "./.github/workflows/aws-deploy.yaml" + with: + role-to-assume: "arn:aws:iam::878654265857:role/sagebase-github-oidc-sage-bionetworks-it-schematic-infra-v2" + role-session-name: ${{ github.repository_owner }}-${{ github.event.repository.name }}-${{ github.run_id }} + environment: stage diff --git a/.gitignore b/.gitignore index 82f9275..465847b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,30 +1,14 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST +*.swp +package-lock.json +__pycache__ +.pytest_cache +.venv +*.egg-info + +# CDK asset staging directory +.cdk.staging +cdk.out + # PyInstaller # Usually these files are written by a python script from a template @@ -39,17 +23,14 @@ pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ -.nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover -*.py,cover .hypothesis/ .pytest_cache/ -cover/ # Translations *.mo @@ -59,7 +40,6 @@ cover/ *.log local_settings.py db.sqlite3 -db.sqlite3-journal # Flask stuff: instance/ @@ -72,51 +52,16 @@ instance/ docs/_build/ # PyBuilder -.pybuilder/ target/ # Jupyter Notebook .ipynb_checkpoints -# IPython -profile_default/ -ipython_config.py - # pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version +.python-version -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/latest/usage/project/#working-with-version-control -.pdm.toml -.pdm-python -.pdm-build/ - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff +# celery beat schedule file celerybeat-schedule -celerybeat.pid # SageMath parsed files *.sage.py @@ -142,21 +87,33 @@ venv.bak/ # mypy .mypy_cache/ -.dmypy.json -dmypy.json -# Pyre type checker -.pyre/ +.idea/ +git-crypt.key + +# Elastic Beanstalk Files +.elasticbeanstalk/* +!.elasticbeanstalk/*.cfg.yml +!.elasticbeanstalk/*.global.yml -# pytype static type analyzer -.pytype/ +# sceptre remote templates +templates/remote/ + +# lambda artifacts +lambdas/*.zip + +# MAC Crap +.DS_Store + +# temp files +temp/ + +# pipenv +Pipfile* -# Cython debug symbols -cython_debug/ +# npm +node_modules/ -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ +# sceptre +sceptre/**/templates/remote/ +.dump/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..32b7a9f --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,42 @@ +default_language_version: + python: python3 + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: end-of-file-fixer + - id: mixed-line-ending + - id: trailing-whitespace + - repo: https://github.com/PyCQA/flake8 + rev: 7.1.1 + hooks: + - id: flake8 + - repo: https://github.com/adrienverge/yamllint + rev: v1.35.1 + hooks: + - id: yamllint + - repo: https://github.com/awslabs/cfn-python-lint + rev: v1.19.0 + hooks: + - id: cfn-python-lint + args: + - "-i=E1001" + exclude: | + (?x)( + ^.venv/| + ^tests/| + ^docker/| + ^temp/| + ^.github/| + ^.pre-commit-config.yaml + ) + - repo: https://github.com/psf/black + rev: 24.10.0 + hooks: + - id: black + - repo: https://github.com/sirosen/check-jsonschema + rev: 0.29.4 + hooks: + - id: check-github-workflows + - id: check-github-actions diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000..b83bc42 --- /dev/null +++ b/.yamllint @@ -0,0 +1,27 @@ +--- + +extends: default + +rules: + braces: + level: warning + max-spaces-inside: 1 + brackets: + level: warning + max-spaces-inside: 1 + commas: + level: warning + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + level: warning + hyphens: + level: warning + indentation: + level: warning + indent-sequences: consistent + line-length: disable + truthy: disable + new-line-at-end-of-file: + level: warning diff --git a/README.md b/README.md index de746e1..a186287 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,284 @@ -# agora-infra-v3 -Project to deploy Agora to AWS + +# AWS CDK app + +AWS CDK app for deploying Agora. + +# Prerequisites + +AWS CDK projects require some bootstrapping before synthesis or deployment. +Please review the [bootstapping documentation](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html#getting_started_bootstrap) +before development. + +# Dev Container + +This repository provides a [dev container](https://containers.dev/) that includes all the tools +required to develop this AWS CDK app. + +## Opening the project inside its dev container + +With VS Code: + +1. Clone this repo +2. File > Open Folder... +3. A prompt should invite you to open the project inside the dev container. If not, open VS Code + Command Palette and select "Dev Containers: Open Folder in Container..." + +With GitHub Codespaces: + +1. From the main page of this repository, click on the button "Code" > Codespaces > Click on the + button "Create codespace" + +That's it! You are now inside the dev container and have access to all the development tools. + +# Development + +All the development tools are provided when developing inside the dev container +(see above). These tools include Python, AWS CLI, AWS CDK CLI, etc. These tools +also include a Python virtual environment where all the Python packages needed +are already installed. + +If you decide the develop outside of the dev container, some of the development +tools can be installed by running: + +```console +./tools/setup.sh +``` + +Development requires the activation of the Python virtual environment: + +``` +$ source .venv/bin/activate +``` + +At this point you can now synthesize the CloudFormation template for this code. + +``` +$ cdk synth +``` + +To add additional dependencies, for example other CDK libraries, just add +them to your `setup.py` file and rerun the `pip install -r requirements.txt` +command. + +## Useful commands + + * `cdk ls` list all stacks in the app + * `cdk synth` emits the synthesized CloudFormation template + * `cdk deploy` deploy this stack to your default AWS account/region + * `cdk diff` compare deployed stack with current state + * `cdk docs` open CDK documentation + + +# Testing + +## Static Analysis + +As a pre-deployment step we syntactically validate the CDK json, yaml and +python files with [pre-commit](https://pre-commit.com). + +Please install pre-commit, once installed the file validations will +automatically run on every commit. Alternatively you can manually +execute the validations by running `pre-commit run --all-files`. + +Verify CDK to Cloudformation conversion by running [cdk synth]: + +```console +ENV=dev cdk synth +``` + +The Cloudformation output is saved to the `cdk.out` folder + +## Unit Tests + +Tests are available in the tests folder. Execute the following to run tests: + +``` +python -m pytest tests/ -s -v +``` + + +# Environments + +An `ENV` environment variable must be set when running the `cdk` command tell the +CDK which environment's variables to use when synthesising or deploying the stacks. + +Set environment variables for each environment in the [app.py](./app.py) file: + +```python +environment_variables = { + "VPC_CIDR": "10.254.192.0/24", + "FQDN": "dev.app.io", + "CERTIFICATE_ARN": "arn:aws:acm:us-east-1:XXXXXXXXXXX:certificate/0e9682f6-3ffa-46fb-9671-b6349f5164d6", + "TAGS": {"CostCenter": "NO PROGRAM / 000000"}, +} +``` + +For example, synthesis with the `prod` environment variables: + +```console +ENV=prod cdk synth +``` + +# Certificates + +Certificates to set up HTTPS connections should be created manually in AWS certificate manager. +This is not automated due to AWS requiring manual verification of the domain ownership. +Once created take the ARN of the certificate and set that ARN in environment_variables. + +![ACM certificate](docs/acm-certificate.png) + +# Secrets + +Secrets can be manually created in the +[AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/create_secret.html). +When naming your secret make sure that the secret does not end in a pattern that matches +`-??????`, this will cause issues with how AWS CDK looks up secrets. + +To pass secrets to a container set the secrets manager `container_secrets` +when creating a `ServiceProp` object. You'll be creating a list of `ServiceSecret` objects: +```python +from src.service_props import ServiceProps, ServiceSecret + +app_service_props = ServiceProps( + container_name="app", + container_port=443, + container_memory=1024, + container_location="ghcr.io/sage-bionetworks/app:v1.0", + container_secrets=[ + ServiceSecret( + secret_name="app/dev/DATABASE", + environment_key="NAME_OF_ENVIRONMENT_VARIABLE_SET_FOR_CONTAINER", + ), + ServiceSecret( + secret_name="app/dev/PASSWORD", + environment_key="SINGLE_VALUE_SECRET", + ) + ] +) +``` + +For example, the KVs for `app/dev/DATABASE` could be: +```json +{ + "DATABASE_USER": "maria", + "DATABASE_PASSWORD": "password" +} +``` + +And the value for `app/dev/PASSWORD` could be: `password` + +In the application (Python) code the secrets may be loaded into a dict using code like: + +```python +import json +import os + +all_secrets_dict = json.loads(os.environ["NAME_OF_ENVIRONMENT_VARIABLE_SET_FOR_CONTAINER"]) +``` + +In the case of a single value you may load the value like: + +```python +import os + +my_secret = os.environ.get("SINGLE_VALUE_SECRET", None) +``` + + +> [!NOTE] +> Retrieving secrets requires access to the AWS Secrets Manager + +# Deployment + +## Bootstrap + +There are a few items that need to be manually bootstrapped before deploying the application. + +* Add secrets to the AWS Secrets Manager +* Create an [ACM certificate for the application](#Certificates) using the AWS Certificates Manager +* Update environment_variables in [app.py](app.py) with variable specific to each environment. +* Update references to the docker images in [app.py](app.py) + (i.e. `ghcr.io/sage-bionetworks/app-xxx:`) +* (Optional) Update the `ServiceProps` objects in [app.py](app.py) with parameters specific to + each container. + +## Login with the AWS CLI + +> [!NOTE] +> This and the following sections assume that you are working in the AWS account +> `org-sagebase-itsandbox` with the role `Developer` and that you are deploying +> to the `us-east-1` region. If this assumption is correct, you should be able +> to simply copy-paste the following commands, otherwise adapting the +> configuration should be straightforward. + +Create the config file if it doesn't exist yet. + +```console +mkdir ~/.aws && touch ~/.aws/config +``` + +As a Developer working in Sage IT Sandbox AWS account, add the following profile to the config file. + +```ini +[profile itsandbox-dev] +sso_start_url = https://d-906769aa66.awsapps.com/start +sso_region = us-east-1 +sso_account_id = XXXXXXXXX +sso_role_name = Developer +``` + +Login with the AWS CLI: + +```console +aws --profile itsandbox-dev sso login +``` + + +## Deploy + +Deployment requires setting up an [AWS profile](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-quickstart.html) +then executing the following command: + +```console +AWS_PROFILE=itsandbox-dev AWS_DEFAULT_REGION=us-east-1 ENV=dev cdk deploy --all +``` + +## Force new deployment + +```console +AWS_PROFILE=itsandbox-dev AWS_DEFAULT_REGION=us-east-1 aws ecs update-service \ + --cluster \ + --service \ + --force-new-deployment +``` + +# Execute a command from a container running on ECS + +Once a container has been deployed successfully it is accessible for debugging using the +[ECS execute-command](https://docs.aws.amazon.com/cli/latest/reference/ecs/execute-command.html) + +Example to get an interactive shell run into a container: + +```console +AWS_PROFILE=itsandbox-dev AWS_DEFAULT_REGION=us-east-1 aws ecs execute-command \ + --cluster AppEcs-ClusterEB0386A7-BygXkQgSvdjY \ + --task a2916461f65747f390fd3e29f1b387d8 \ + --container app-mariadb \ + --command "/bin/sh" --interactive +``` + + +# CI Workflow + +This repo has been set up to use Github Actions CI to continuously deploy the application. + +The workflow for continuous integration: + +* Create PR from the git dev branch +* PR is reviewed and approved +* PR is merged +* CI deploys changes to the dev environment (dev.app.io) in the AWS dev account. +* Changes are promoted (or merged) to the git stage branch. +* CI deploys changes to the staging environment (stage.app.io) in the AWS prod account. +* Changes are promoted (or merged) to the git prod branch. +* CI deploys changes to the prod environment (prod.app.io) in the AWS prod account. diff --git a/app.py b/app.py new file mode 100644 index 0000000..ffd8e91 --- /dev/null +++ b/app.py @@ -0,0 +1,213 @@ +from os import environ + +import aws_cdk as cdk + +from src.ecs_stack import EcsStack +from src.load_balancer_stack import LoadBalancerStack +from src.network_stack import NetworkStack +from src.service_props import ServiceProps, ContainerVolume +from src.service_stack import LoadBalancedServiceStack, ServiceStack + +# get the environment and set environment specific variables +VALID_ENVIRONMENTS = ["dev", "stage", "prod"] +environment = environ.get("ENV") +match environment: + case "prod": + environment_variables = { + "VPC_CIDR": "10.254.174.0/24", + "FQDN": "prod.agora.io", + "CERTIFICATE_ARN": "arn:aws:acm:us-east-1:681175625864:certificate/69b3ba97-b382-4648-8f94-a250b77b4994", + "TAGS": {"CostCenter": "NO PROGRAM / 000000"}, + } + case "stage": + environment_variables = { + "VPC_CIDR": "10.254.173.0/24", + "FQDN": "stage.agora.io", + "CERTIFICATE_ARN": "arn:aws:acm:us-east-1:681175625864:certificate/69b3ba97-b382-4648-8f94-a250b77b4994", + "TAGS": {"CostCenter": "NO PROGRAM / 000000"}, + } + case "dev": + environment_variables = { + "VPC_CIDR": "10.254.172.0/24", + "FQDN": "dev.agora.io", + "CERTIFICATE_ARN": "arn:aws:acm:us-east-1:607346494281:certificate/e8093404-7db1-4042-90d0-01eb5bde1ffc", + "TAGS": {"CostCenter": "NO PROGRAM / 000000"}, + } + case _: + valid_envs_str = ",".join(VALID_ENVIRONMENTS) + raise SystemExit( + f"Must set environment variable `ENV` to one of {valid_envs_str}. Currently set to {environment}." + ) + +stack_name_prefix = f"agora-{environment}" +fully_qualified_domain_name = environment_variables["FQDN"] +environment_tags = environment_variables["TAGS"] +agora_version = "edge" + +# Define stacks +cdk_app = cdk.App() + +# recursively apply tags to all stack resources +if environment_tags: + for key, value in environment_tags.items(): + cdk.Tags.of(cdk_app).add(key, value) + +network_stack = NetworkStack( + scope=cdk_app, + construct_id=f"{stack_name_prefix}-network", + vpc_cidr=environment_variables["VPC_CIDR"], +) + +ecs_stack = EcsStack( + scope=cdk_app, + construct_id=f"{stack_name_prefix}-ecs", + vpc=network_stack.vpc, + namespace=fully_qualified_domain_name, +) + +# From AWS docs https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect-concepts-deploy.html +# The public discovery and reachability should be created last by AWS CloudFormation, including the frontend +# client service. The services need to be created in this order to prevent an time period when the frontend +# client service is running and available the public, but a backend isn't. +load_balancer_stack = LoadBalancerStack( + scope=cdk_app, + construct_id=f"{stack_name_prefix}-load-balancer", + vpc=network_stack.vpc, +) + +api_docs_props = ServiceProps( + container_name="agora-api-docs", + container_location=f"ghcr.io/sage-bionetworks/agora-api-docs:{agora_version}", + container_port=8010, + container_memory=200, + container_env_vars={"PORT": "8010"}, +) +api_docs_stack = ServiceStack( + scope=cdk_app, + construct_id=f"{stack_name_prefix}-api-docs", + vpc=network_stack.vpc, + cluster=ecs_stack.cluster, + props=api_docs_props, +) + +mongo_props = ServiceProps( + container_name="agora-mongo", + container_location=f"ghcr.io/sage-bionetworks/agora-mongo:{agora_version}", + container_port=27017, + container_memory=500, + container_env_vars={ + "MONGO_INITDB_ROOT_USERNAME": "root", + "MONGO_INITDB_ROOT_PASSWORD": "changeme", + "MONGO_INITDB_DATABASE": "agora", + }, + container_volumes=[ + ContainerVolume( + path="/data/db", + size=30, + ) + ], +) +mongo_stack = ServiceStack( + scope=cdk_app, + construct_id=f"{stack_name_prefix}-mongo", + vpc=network_stack.vpc, + cluster=ecs_stack.cluster, + props=mongo_props, +) + +# It is probably not appropriate host this container in ECS +# data_props = ServiceProps( +# container_name="agora-data", +# container_location=f"ghcr.io/sage-bionetworks/agora-data:{agora_version}", +# container_port=9999, # Not used +# container_memory=2048, +# ) +# data_stack = ServiceStack( +# scope=cdk_app, +# construct_id=f"{stack_name_prefix}-data", +# vpc=network_stack.vpc, +# cluster=ecs_stack.cluster, +# props=data_props, +# container_env_vars={ +# "DB_USER": "root", +# "DB_PASS": "changeme", +# "DB_NAME": "agora", +# "DB_PORT": "27017", +# "DB_HOST": "agora-mongo", +# "DATA_FILE": "syn13363290", +# "DATA_VERSION": "68", +# "TEAM_IMAGES_ID": "syn12861877", +# "SYNAPSE_AUTH_TOKEN": "agora-service-user-pat-here", +# }, +# ) +# data_stack.add_dependency(mongo_stack) + +api_props = ServiceProps( + container_name="agora-api", + container_location=f"ghcr.io/sage-bionetworks/agora-api:{agora_version}", + container_port=3333, + container_memory=1024, + container_env_vars={ + "MONGODB_URI": "mongodb://root:changeme@agora-mongo:27017/agora?authSource=admin", + "NODE_ENV": "development", + }, +) +api_stack = ServiceStack( + scope=cdk_app, + construct_id=f"{stack_name_prefix}-api", + vpc=network_stack.vpc, + cluster=ecs_stack.cluster, + props=api_props, +) +api_stack.add_dependency(mongo_stack) + +app_props = ServiceProps( + container_name="agora-app", + container_location=f"ghcr.io/sage-bionetworks/agora-app:{agora_version}", + container_port=4200, + container_memory=200, + container_env_vars={ + "API_DOCS_URL": f"http://{fully_qualified_domain_name}/api-docs", + "APP_VERSION": f"{agora_version}", + "CSR_API_URL": f"http://{fully_qualified_domain_name}/api/v1", + "SSR_API_URL": "http://agora-api:3333/v1", + }, +) +app_stack = ServiceStack( + scope=cdk_app, + construct_id=f"{stack_name_prefix}-app", + vpc=network_stack.vpc, + cluster=ecs_stack.cluster, + props=app_props, +) +app_stack.add_dependency(api_stack) + +apex_props = ServiceProps( + container_name="agora-apex", + container_location=f"ghcr.io/sage-bionetworks/agora-apex:{agora_version}", + container_port=80, + container_memory=200, + container_env_vars={ + "API_DOCS_HOST": "agora-api-docs", + "API_DOCS_PORT": "8010", + "API_HOST": "agora-api", + "API_PORT": "3333", + "APP_HOST": "agora-app", + "APP_PORT": "4200", + }, +) +apex_stack = LoadBalancedServiceStack( + scope=cdk_app, + construct_id=f"{stack_name_prefix}-apex", + vpc=network_stack.vpc, + cluster=ecs_stack.cluster, + props=apex_props, + load_balancer=load_balancer_stack.alb, + certificate_arn=environment_variables["CERTIFICATE_ARN"], + health_check_path="/health", +) +apex_stack.add_dependency(app_stack) +apex_stack.add_dependency(api_docs_stack) +apex_stack.add_dependency(api_stack) + +cdk_app.synth() diff --git a/cdk.json b/cdk.json new file mode 100644 index 0000000..e86570a --- /dev/null +++ b/cdk.json @@ -0,0 +1,66 @@ +{ + "app": "python3 app.py", + "watch": { + "include": [ + "**" + ], + "exclude": [ + "README.md", + "cdk*.json", + "requirements*.txt", + "**/__init__.py", + "**/__pycache__", + "tests" + ] + }, + "context": { + "@aws-cdk/aws-lambda:recognizeLayerVersion": true, + "@aws-cdk/core:checkSecretUsage": true, + "@aws-cdk/core:target-partitions": [ + "aws", + "aws-cn" + ], + "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, + "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, + "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, + "@aws-cdk/aws-iam:minimizePolicies": true, + "@aws-cdk/core:validateSnapshotRemovalPolicy": true, + "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, + "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, + "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, + "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, + "@aws-cdk/core:enablePartitionLiterals": true, + "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, + "@aws-cdk/aws-iam:standardizedServicePrincipals": true, + "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, + "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, + "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, + "@aws-cdk/aws-route53-patters:useCertificate": true, + "@aws-cdk/customresources:installLatestAwsSdkDefault": false, + "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, + "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, + "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, + "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, + "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, + "@aws-cdk/aws-redshift:columnId": true, + "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true, + "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, + "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, + "@aws-cdk/aws-kms:aliasNameRef": true, + "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true, + "@aws-cdk/core:includePrefixInUniqueNameGeneration": true, + "@aws-cdk/aws-efs:denyAnonymousAccess": true, + "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true, + "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true, + "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true, + "@aws-cdk/aws-rds:auroraClusterChangeScopeOfInstanceParameterGroupWithEachParameters": true, + "@aws-cdk/aws-appsync:useArnForSourceApiAssociationIdentifier": true, + "@aws-cdk/aws-rds:preventRenderingDeprecatedCredentials": true, + "@aws-cdk/aws-codepipeline-actions:useNewDefaultBranchForCodeCommitSource": true, + "@aws-cdk/aws-cloudwatch-actions:changeLambdaPermissionLogicalIdForLambdaAction": true, + "@aws-cdk/aws-codepipeline:crossAccountKeysDefaultValueToFalse": true, + "@aws-cdk/aws-codepipeline:defaultPipelineTypeToV2": true, + "@aws-cdk/aws-kms:reduceCrossAccountRegionPolicyScope": true, + "@aws-cdk/aws-eks:nodegroupNameAttribute": true + } +} diff --git a/docs/acm-certificate.png b/docs/acm-certificate.png new file mode 100644 index 0000000..343557f Binary files /dev/null and b/docs/acm-certificate.png differ diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..1f2bf77 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,2 @@ +pre-commit~=3.8.0 +pytest==6.2.5 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..92c10ed --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +aws-cdk-lib==2.139.0 +constructs>=10.0.0,<11.0.0 +boto3>=1.34.1 diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/ecs_stack.py b/src/ecs_stack.py new file mode 100644 index 0000000..8ac821f --- /dev/null +++ b/src/ecs_stack.py @@ -0,0 +1,34 @@ +import aws_cdk as cdk + +from aws_cdk import ( + aws_ecs as ecs, + aws_ec2 as ec2, +) + +from constructs import Construct + + +class EcsStack(cdk.Stack): + """ + ECS cluster + """ + + def __init__( + self, + scope: Construct, + construct_id: str, + vpc: ec2.Vpc, + namespace: str, + **kwargs + ) -> None: + super().__init__(scope, construct_id, **kwargs) + + self.cluster = ecs.Cluster( + self, + "Cluster", + vpc=vpc, + default_cloud_map_namespace=ecs.CloudMapNamespaceOptions( + name=namespace, + use_for_service_connect=True, + ), + ) diff --git a/src/load_balancer_stack.py b/src/load_balancer_stack.py new file mode 100644 index 0000000..b9061ad --- /dev/null +++ b/src/load_balancer_stack.py @@ -0,0 +1,24 @@ +import aws_cdk as cdk + +from aws_cdk import ( + aws_ec2 as ec2, + aws_elasticloadbalancingv2 as elbv2, +) + +from constructs import Construct + + +class LoadBalancerStack(cdk.Stack): + """ + API Gateway to allow access to ECS app from the internet + """ + + def __init__( + self, scope: Construct, construct_id: str, vpc: ec2.Vpc, **kwargs + ) -> None: + super().__init__(scope, construct_id, **kwargs) + + self.alb = elbv2.ApplicationLoadBalancer( + self, "AppLoadBalancer", vpc=vpc, internet_facing=True + ) + cdk.CfnOutput(self, "dns", value=self.alb.load_balancer_dns_name) diff --git a/src/network_stack.py b/src/network_stack.py new file mode 100644 index 0000000..3ad19a5 --- /dev/null +++ b/src/network_stack.py @@ -0,0 +1,21 @@ +import aws_cdk as cdk + +from aws_cdk import aws_ec2 as ec2 + +from constructs import Construct + + +class NetworkStack(cdk.Stack): + """ + Network for applications + """ + + def __init__(self, scope: Construct, construct_id: str, vpc_cidr, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + # ------------------- + # create a VPC + # ------------------- + self.vpc = ec2.Vpc( + self, "Vpc", max_azs=2, ip_addresses=ec2.IpAddresses.cidr(vpc_cidr) + ) diff --git a/src/service_props.py b/src/service_props.py new file mode 100644 index 0000000..496fe8d --- /dev/null +++ b/src/service_props.py @@ -0,0 +1,108 @@ +from dataclasses import dataclass +from typing import List, Optional, Sequence + +from aws_cdk import aws_ecs as ecs + +CONTAINER_LOCATION_PATH_ID = "path://" + + +@dataclass +class ServiceSecret: + """ + Holds onto configuration for the secrets to be used in the container. + + Attributes: + secret_name: The name of the secret as stored in the AWS Secrets Manager. + environment_key: The name of the environment variable to be set within the container. + """ + + secret_name: str + """The name of the secret as stored in the AWS Secrets Manager.""" + + environment_key: str + """The name of the environment variable to be set within the container.""" + + +@dataclass +class ContainerVolume: + """ + Holds onto configuration for a volume used in the container. + + Attributes: + path: The path on the container to mount the host volume at. + size: The size of the volume in GiB. + read_only: Container has read-only access to the volume, set to `false` for write access. + """ + + path: str + """The path on the container to mount the host volume at.""" + + size: int = 15 + """The size of the volume in GiB.""" + + read_only: bool = False + """Container has read-only access to the volume, set to `false` for write access.""" + + +class ServiceProps: + """ + ECS service properties + + container_name: the name of the container + container_location: + supports "path://" for building container from local (i.e. path://docker/MyContainer) + supports docker registry references (i.e. ghcr.io/sage-bionetworks/app:latest) + container_port: the container application port + container_memory: the container application memory + container_env_vars: a json dictionary of environment variables to pass into the container + i.e. {"EnvA": "EnvValueA", "EnvB": "EnvValueB"} + container_secrets: List of `ServiceSecret` resources to pull from AWS secrets manager + container_volumes: List of `ContainerVolume` resources to mount into the container + auto_scale_min_capacity: the fargate auto scaling minimum capacity + auto_scale_max_capacity: the fargate auto scaling maximum capacity + container_command: Optional commands to run during the container startup + container_healthcheck: Optional health check configuration for the container + """ + + def __init__( + self, + container_name: str, + container_location: str, + container_port: int, + container_memory: int = 512, + container_env_vars: dict = None, + container_secrets: List[ServiceSecret] = None, + container_volumes: List[ContainerVolume] = None, + auto_scale_min_capacity: int = 1, + auto_scale_max_capacity: int = 1, + container_command: Optional[Sequence[str]] = None, + container_healthcheck: Optional[ecs.HealthCheck] = None, + ) -> None: + self.container_name = container_name + self.container_port = container_port + self.container_memory = container_memory + if CONTAINER_LOCATION_PATH_ID in container_location: + container_location = container_location.removeprefix( + CONTAINER_LOCATION_PATH_ID + ) + self.container_location = container_location + + if container_env_vars is None: + self.container_env_vars = {} + else: + self.container_env_vars = container_env_vars + + if container_secrets is None: + self.container_secrets = [] + else: + self.container_secrets = container_secrets + + if container_volumes is None: + self.container_volumes = [] + else: + self.container_volumes = container_volumes + + self.auto_scale_min_capacity = auto_scale_min_capacity + self.auto_scale_max_capacity = auto_scale_max_capacity + self.container_command = container_command + self.container_healthcheck = container_healthcheck diff --git a/src/service_stack.py b/src/service_stack.py new file mode 100644 index 0000000..26775a3 --- /dev/null +++ b/src/service_stack.py @@ -0,0 +1,248 @@ +import aws_cdk as cdk +from aws_cdk import Duration as duration +from aws_cdk import aws_certificatemanager as acm +from aws_cdk import aws_ec2 as ec2 +from aws_cdk import aws_ecs as ecs +from aws_cdk import aws_elasticloadbalancingv2 as elbv2 +from aws_cdk import aws_iam as iam +from aws_cdk import aws_logs as logs +from aws_cdk import aws_secretsmanager as sm +from aws_cdk import Size as size +from constructs import Construct + +from src.service_props import ServiceProps + +ALB_HTTP_LISTENER_PORT = 80 +ALB_HTTPS_LISTENER_PORT = 443 + + +class ServiceStack(cdk.Stack): + """ + ECS Service stack + """ + + def __init__( + self, + scope: Construct, + construct_id: str, + vpc: ec2.Vpc, + cluster: ecs.Cluster, + props: ServiceProps, + **kwargs, + ) -> None: + super().__init__(scope, construct_id, **kwargs) + + # allow containers default task access and s3 bucket access + task_role = iam.Role( + self, + "TaskRole", + assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), + managed_policies=[ + iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3FullAccess"), + ], + ) + task_role.add_to_policy( + iam.PolicyStatement( + actions=[ + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "ssmmessages:CreateControlChannel", + "ssmmessages:CreateDataChannel", + "ssmmessages:OpenControlChannel", + "ssmmessages:OpenDataChannel", + ], + resources=["*"], + effect=iam.Effect.ALLOW, + ) + ) + + # ECS task with fargate + self.task_definition = ecs.FargateTaskDefinition( + self, + "TaskDef", + cpu=1024, + memory_limit_mib=4096, + task_role=task_role, + ) + + image = ecs.ContainerImage.from_registry(props.container_location) + if "path://" in props.container_location: # build container from source + location = props.container_location.removeprefix("path://") + image = ecs.ContainerImage.from_asset(location) + + def _get_secret(scope: Construct, id: str, name: str) -> sm.Secret: + """Get a secret from the AWS secrets manager""" + isecret = sm.Secret.from_secret_name_v2(scope, id, name) + return ecs.Secret.from_secrets_manager(isecret) + + secrets = {} + for secret in props.container_secrets: + secrets[secret.environment_key] = _get_secret( + self, f"sm-secrets-{secret.environment_key}", secret.secret_name + ) + + self.container = self.task_definition.add_container( + props.container_name, + image=image, + memory_limit_mib=props.container_memory, + environment=props.container_env_vars, + secrets=secrets, + port_mappings=[ + ecs.PortMapping( + name=props.container_name, + container_port=props.container_port, + protocol=ecs.Protocol.TCP, + ) + ], + logging=ecs.LogDrivers.aws_logs( + stream_prefix=f"{construct_id}", + log_retention=logs.RetentionDays.FOUR_MONTHS, + ), + command=props.container_command, + health_check=props.container_healthcheck, + ) + + self.security_group = ec2.SecurityGroup(self, "SecurityGroup", vpc=vpc) + self.security_group.add_ingress_rule( + peer=ec2.Peer.ipv4("0.0.0.0/0"), + connection=ec2.Port.tcp(props.container_port), + ) + + # attach ECS task to ECS cluster + self.service = ecs.FargateService( + self, + "Service", + cluster=cluster, + task_definition=self.task_definition, + enable_execute_command=True, + circuit_breaker=ecs.DeploymentCircuitBreaker(enable=True, rollback=True), + security_groups=([self.security_group]), + service_connect_configuration=ecs.ServiceConnectProps( + log_driver=ecs.LogDrivers.aws_logs(stream_prefix=f"{construct_id}"), + services=[ + ecs.ServiceConnectService( + port_mapping_name=props.container_name, + port=props.container_port, + dns_name=props.container_name, + ) + ], + ), + ) + + # Setup AutoScaling policy + scaling = self.service.auto_scale_task_count( + min_capacity=props.auto_scale_min_capacity, + max_capacity=props.auto_scale_max_capacity, + ) + scaling.scale_on_cpu_utilization( + "CpuScaling", + target_utilization_percent=50, + ) + scaling.scale_on_memory_utilization( + "MemoryScaling", + target_utilization_percent=50, + ) + + # mount volumes + for container_volume in props.container_volumes: + service_volume = ecs.ServiceManagedVolume( + self, + "ContainerVolume", + name=props.container_name, + managed_ebs_volume=ecs.ServiceManagedEBSVolumeConfiguration( + size=size.gibibytes(container_volume.size), + volume_type=ec2.EbsDeviceVolumeType.GP3, + ), + ) + + self.task_definition.add_volume( + name=props.container_name, configured_at_launch=True + ) + self.service.add_volume(service_volume) + + service_volume.mount_in( + self.container, + container_path=container_volume.path, + read_only=container_volume.read_only, + ) + + +class LoadBalancedServiceStack(ServiceStack): + """ + A special stack to create an ECS service fronted by a load balancer. This allows us to split up + the ECS services and the load balancer into separate stacks. It makes maintaining the stacks + easier. Unfortunately, due to the way AWS works, setting up a load balancer and ECS service + in different stacks may cause cyclic references. + https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_ecs/README.html#using-a-load-balancer-from-a-different-stack + + To work around this problem we use the "Split at listener" option from + https://github.com/aws-samples/aws-cdk-examples + """ + + def __init__( + self, + scope: Construct, + construct_id: str, + vpc: ec2.Vpc, + cluster: ecs.Cluster, + props: ServiceProps, + load_balancer: elbv2.ApplicationLoadBalancer, + certificate_arn: str, + health_check_path: str = "/", + health_check_interval: int = 1, # max is 5 + **kwargs, + ) -> None: + super().__init__(scope, construct_id, vpc, cluster, props, **kwargs) + + # ------------------- + # ACM Certificate for HTTPS + # ------------------- + self.cert = acm.Certificate.from_certificate_arn( + self, "Cert", certificate_arn=certificate_arn + ) + + # ------------------------------- + # Setup https + # ------------------------------- + https_listener = elbv2.ApplicationListener( + self, + "HttpsListener", + load_balancer=load_balancer, + port=ALB_HTTPS_LISTENER_PORT, + open=True, + protocol=elbv2.ApplicationProtocol.HTTPS, + certificates=[self.cert], + ) + + https_listener.add_targets( + "HttpsTarget", + port=props.container_port, + protocol=elbv2.ApplicationProtocol.HTTP, + targets=[self.service], + health_check=elbv2.HealthCheck( + path=health_check_path, interval=duration.minutes(health_check_interval) + ), + ) + + # ------------------------------- + # redirect http to https + # ------------------------------- + http_listener = elbv2.ApplicationListener( + self, + "HttpListener", + load_balancer=load_balancer, + port=ALB_HTTP_LISTENER_PORT, + open=True, + protocol=elbv2.ApplicationProtocol.HTTP, + ) + + http_listener.add_action( + "HttpRedirect", + action=elbv2.ListenerAction.redirect( + port=str(ALB_HTTPS_LISTENER_PORT), + protocol=(elbv2.ApplicationProtocol.HTTPS).value, + permanent=True, + ), + ) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/test_network_stack.py b/tests/unit/test_network_stack.py new file mode 100644 index 0000000..ba6ea66 --- /dev/null +++ b/tests/unit/test_network_stack.py @@ -0,0 +1,12 @@ +import aws_cdk as core +import aws_cdk.assertions as assertions + +from src.network_stack import NetworkStack + + +def test_vpc_created(): + app = core.App() + vpc_cidr = "10.254.192.0/24" + network = NetworkStack(app, "NetworkStack", vpc_cidr) + template = assertions.Template.from_stack(network) + template.has_resource_properties("AWS::EC2::VPC", {"CidrBlock": vpc_cidr}) diff --git a/tests/unit/test_service_stack.py b/tests/unit/test_service_stack.py new file mode 100644 index 0000000..1cbbf63 --- /dev/null +++ b/tests/unit/test_service_stack.py @@ -0,0 +1,56 @@ +import aws_cdk as cdk +import aws_cdk.assertions as assertions + +from src.network_stack import NetworkStack +from src.ecs_stack import EcsStack +from src.service_props import ServiceProps, ServiceSecret, ContainerVolume +from src.service_stack import ServiceStack + + +def test_service_stack_created(): + cdk_app = cdk.App() + vpc_cidr = "10.254.192.0/24" + network_stack = NetworkStack(cdk_app, "NetworkStack", vpc_cidr=vpc_cidr) + ecs_stack = EcsStack( + cdk_app, "EcsStack", vpc=network_stack.vpc, namespace="dev.app.io" + ) + + app_props = ServiceProps( + container_name="app", + container_location="ghcr.io/sage-bionetworks/app:1.0", + container_port=8010, + container_memory=200, + container_secrets=[ + ServiceSecret( + secret_name="/app/secret", + environment_key="APP_SECRET", + ) + ], + container_volumes=[ContainerVolume(path="/work")], + container_command=["test"], + container_healthcheck=cdk.aws_ecs.HealthCheck(command=["CMD", "/healthcheck"]), + ) + app_stack = ServiceStack( + scope=cdk_app, + construct_id="app", + vpc=network_stack.vpc, + cluster=ecs_stack.cluster, + props=app_props, + ) + + template = assertions.Template.from_stack(app_stack) + template.has_resource_properties( + "AWS::ECS::TaskDefinition", + { + "ContainerDefinitions": [ + { + "Image": "ghcr.io/sage-bionetworks/app:1.0", + "Memory": 200, + "MountPoints": [{"ContainerPath": "/work"}], + "Secrets": [{"Name": "APP_SECRET"}], + "Command": ["test"], + "HealthCheck": {"Command": ["CMD", "/healthcheck"]}, + } + ] + }, + ) diff --git a/tools/setup.sh b/tools/setup.sh new file mode 100755 index 0000000..f92cf9b --- /dev/null +++ b/tools/setup.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# Safer bash scripts +set -euxo pipefail + +# Install Node.js dependencies +npm install -g aws-cdk@2.151.0 + +# Install Python dependencies +python -m venv .venv +source .venv/bin/activate +pip install --upgrade pip +pip install -r requirements.txt -r requirements-dev.txt + +# Install git hooks +git config --global --add safe.directory "$PWD" +pre-commit install --install-hooks