From 5ed1b538180989f8e37489a5b4da805fb2ef8fd6 Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Tue, 26 Sep 2017 21:14:40 -0400 Subject: [PATCH 01/20] re-allow disabling elasticsearch This reverts commit 806bba7e6a0eab3df38f5c468a3e460225611081. --- stack/common.py | 2 ++ stack/search.py | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/stack/common.py b/stack/common.py index 8440510..31c3651 100644 --- a/stack/common.py +++ b/stack/common.py @@ -2,6 +2,8 @@ from .template import template +dont_create_value = "(none)" + arn_prefix = Ref(template.add_parameter(Parameter( "ArnPrefix", Description="The prefix to use for Amazon Resource Names (ARNs).", diff --git a/stack/search.py b/stack/search.py index 9a033bc..6f7b79a 100644 --- a/stack/search.py +++ b/stack/search.py @@ -1,13 +1,14 @@ import os from awacs.aws import Action, Allow, Policy, Principal, Statement -from troposphere import GetAtt, Output, Parameter, Ref +from troposphere import Equals, GetAtt, Not, Output, Parameter, Ref from troposphere.elasticsearch import ( Domain, EBSOptions, ElasticsearchClusterConfig ) +from .common import dont_create_value from .template import template # TODO: clean up naming for this role so it's the same for all configurations @@ -18,12 +19,13 @@ es_instance_type = template.add_parameter(Parameter( "ElasticsearchInstanceType", - Default='t2.small.elasticsearch', + Default=dont_create_value, Description="Elasticsearch instance type. Note: not all types are supported in all regions; see: " "http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/" "aes-supported-instance-types.html", Type="String", AllowedValues=[ + dont_create_value, 't2.micro.elasticsearch', 't2.small.elasticsearch', 't2.medium.elasticsearch', @@ -82,6 +84,9 @@ Type="Number", )) +es_condition = "Elasticsearch" +template.add_condition(es_condition, Not(Equals(Ref(es_instance_type), dont_create_value))) + # Create an Elasticsearch domain es_domain = template.add_resource( @@ -96,6 +101,7 @@ ), ] ), + Condition=es_condition, EBSOptions=EBSOptions( EBSEnabled=True, VolumeSize=Ref(es_volume_size), From 5b0495615e9f1c0470b5ed11a8311901991fe945 Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Tue, 26 Sep 2017 21:21:49 -0400 Subject: [PATCH 02/20] don't include values for Elasticsearch environment variables if no Elasticsearch domain was created --- stack/environment.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/stack/environment.py b/stack/environment.py index b4dc0bb..695c942 100644 --- a/stack/environment.py +++ b/stack/environment.py @@ -11,7 +11,7 @@ if os.environ.get('USE_GOVCLOUD') != 'on': # not supported by GovCloud, so add it only if it was created (and in this # case we want to avoid importing if it's not needed) - from .search import es_domain + from .search import es_condition, es_domain else: es_domain = None @@ -58,8 +58,8 @@ if es_domain: # not supported by GovCloud, so add it only if it was created environment_variables += [ - ("ELASTICSEARCH_ENDPOINT", GetAtt(es_domain, "DomainEndpoint")), - ("ELASTICSEARCH_PORT", "443"), - ("ELASTICSEARCH_USE_SSL", "on"), - ("ELASTICSEARCH_VERIFY_CERTS", "on"), + ("ELASTICSEARCH_ENDPOINT", If(es_condition, GetAtt(es_domain, "DomainEndpoint"), "")), + ("ELASTICSEARCH_PORT", If(es_condition, "443", "")), + ("ELASTICSEARCH_USE_SSL", If(es_condition, "on", "")), + ("ELASTICSEARCH_VERIFY_CERTS", If(es_condition, "on", "")), ] From 405641603288799e07690d03949b91869296ed2e Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Tue, 26 Sep 2017 21:31:41 -0400 Subject: [PATCH 03/20] don't create Elasticsearch outputs if no Elasticsearch domain was created --- stack/search.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack/search.py b/stack/search.py index 6f7b79a..a7f2e67 100644 --- a/stack/search.py +++ b/stack/search.py @@ -119,10 +119,12 @@ "ElasticsearchDomainEndpoint", Description="Elasticsearch domain endpoint", Value=GetAtt(es_domain, "DomainEndpoint"), + Condition=es_condition, )) template.add_output(Output( "ElasticsearchDomainArn", Description="Elasticsearch domain ARN", Value=GetAtt(es_domain, "DomainArn"), + Condition=es_condition, )) From 437a6bb357186cd8e951f94d2c3f7c7339f2c1d9 Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Tue, 26 Sep 2017 21:31:59 -0400 Subject: [PATCH 04/20] allow disabling Cache and Database, if desired --- stack/cache.py | 11 ++++++++++- stack/database.py | 9 ++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/stack/cache.py b/stack/cache.py index 0971de0..edc2b69 100644 --- a/stack/cache.py +++ b/stack/cache.py @@ -1,5 +1,6 @@ -from troposphere import Equals, If, Parameter, Ref, ec2, elasticache +from troposphere import Equals, If, Not, Parameter, Ref, ec2, elasticache +from .common import dont_create_value from .template import template from .vpc import ( container_a_subnet, @@ -28,6 +29,7 @@ Description="Cache instance class", Type="String", AllowedValues=[ + dont_create_value, 'cache.t2.micro', 'cache.t2.small', 'cache.t2.medium', @@ -50,6 +52,10 @@ )) +cache_condition = "CacheCondition" +template.add_condition(cache_condition, Not(Equals(Ref(cache_node_type), dont_create_value))) + + using_redis_condition = "UsingRedis" template.add_condition( using_redis_condition, @@ -61,6 +67,7 @@ 'CacheSecurityGroup', template=template, GroupDescription="Cache security group.", + Condition=cache_condition, VpcId=Ref(vpc), SecurityGroupIngress=[ # Redis in from web clusters @@ -84,6 +91,7 @@ "CacheSubnetGroup", template=template, Description="Subnets available for the cache instance", + Condition=cache_condition, SubnetIds=[Ref(container_a_subnet), Ref(container_b_subnet)], ) @@ -93,6 +101,7 @@ template=template, Engine=Ref(cache_engine), CacheNodeType=Ref(cache_node_type), + Condition=cache_condition, NumCacheNodes=1, # Must be 1 for redis, but still required Port=If(using_redis_condition, 6379, 11211), VpcSecurityGroupIds=[Ref(cache_security_group)], diff --git a/stack/database.py b/stack/database.py index c17dc9e..8a63091 100644 --- a/stack/database.py +++ b/stack/database.py @@ -1,5 +1,6 @@ -from troposphere import Parameter, Ref, ec2, rds +from troposphere import Equals, Not, Parameter, Ref, ec2, rds +from .common import dont_create_value from .template import template from .vpc import ( container_a_subnet, @@ -57,6 +58,7 @@ Description="Database instance class", Type="String", AllowedValues=[ + dont_create_value, 'db.t1.micro', 'db.m1.small', 'db.m4.large', @@ -142,11 +144,14 @@ ], )) +db_condition = "DatabaseCondition" +template.add_condition(db_condition, Not(Equals(Ref(db_class), dont_create_value))) db_security_group = ec2.SecurityGroup( 'DatabaseSecurityGroup', template=template, GroupDescription="Database security group.", + Condition=db_condition, VpcId=Ref(vpc), SecurityGroupIngress=[ # Postgres in from web clusters @@ -169,6 +174,7 @@ db_subnet_group = rds.DBSubnetGroup( "DatabaseSubnetGroup", template=template, + Condition=db_condition, DBSubnetGroupDescription="Subnets available for the RDS DB Instance", SubnetIds=[Ref(container_a_subnet), Ref(container_b_subnet)], ) @@ -178,6 +184,7 @@ "PostgreSQL", template=template, DBName=Ref(db_name), + Condition=db_condition, AllocatedStorage=Ref(db_allocated_storage), DBInstanceClass=Ref(db_class), Engine="postgres", From a51d94f08b6d7f591f31869fe5a07232d67da018 Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Tue, 26 Sep 2017 21:47:46 -0400 Subject: [PATCH 05/20] don't attempt to add values for DATABASE_URL and CACHE_URL if none was created --- stack/environment.py | 69 ++++++++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 28 deletions(-) diff --git a/stack/environment.py b/stack/environment.py index 695c942..8d35944 100644 --- a/stack/environment.py +++ b/stack/environment.py @@ -3,9 +3,14 @@ from troposphere import AWS_REGION, GetAtt, If, Join, Ref from .assets import assets_bucket, distribution, private_assets_bucket -from .cache import cache_cluster, cache_engine, using_redis_condition +from .cache import ( + cache_cluster, + cache_condition, + cache_engine, + using_redis_condition +) from .common import secret_key -from .database import db_instance, db_name, db_password, db_user +from .database import db_condition, db_instance, db_name, db_password, db_user from .domain import domain_name, domain_name_alternates if os.environ.get('USE_GOVCLOUD') != 'on': @@ -22,31 +27,39 @@ ("DOMAIN_NAME", domain_name), ("ALTERNATE_DOMAIN_NAMES", Join(',', domain_name_alternates)), ("SECRET_KEY", secret_key), - ("DATABASE_URL", Join("", [ - "postgres://", - Ref(db_user), - ":", - Ref(db_password), - "@", - GetAtt(db_instance, 'Endpoint.Address'), - "/", - Ref(db_name), - ])), - ("CACHE_URL", Join("", [ - Ref(cache_engine), - "://", - If( - using_redis_condition, - GetAtt(cache_cluster, 'RedisEndpoint.Address'), - GetAtt(cache_cluster, 'ConfigurationEndpoint.Address') - ), - ":", - If( - using_redis_condition, - GetAtt(cache_cluster, 'RedisEndpoint.Port'), - GetAtt(cache_cluster, 'ConfigurationEndpoint.Port') - ), - ])), + ("DATABASE_URL", If( + db_condition, + Join("", [ + "postgres://", + Ref(db_user), + ":", + Ref(db_password), + "@", + GetAtt(db_instance, 'Endpoint.Address'), + "/", + Ref(db_name), + ]), + "none-created", + )), + ("CACHE_URL", If( + cache_condition, + Join("", [ + Ref(cache_engine), + "://", + If( + using_redis_condition, + GetAtt(cache_cluster, 'RedisEndpoint.Address'), + GetAtt(cache_cluster, 'ConfigurationEndpoint.Address') + ), + ":", + If( + using_redis_condition, + GetAtt(cache_cluster, 'RedisEndpoint.Port'), + GetAtt(cache_cluster, 'ConfigurationEndpoint.Port') + ), + ]), + "none-created", + )), ] if distribution: @@ -58,7 +71,7 @@ if es_domain: # not supported by GovCloud, so add it only if it was created environment_variables += [ - ("ELASTICSEARCH_ENDPOINT", If(es_condition, GetAtt(es_domain, "DomainEndpoint"), "")), + ("ELASTICSEARCH_ENDPOINT", If(es_condition, GetAtt(es_domain, "DomainEndpoint"), "none-created")), ("ELASTICSEARCH_PORT", If(es_condition, "443", "")), ("ELASTICSEARCH_USE_SSL", If(es_condition, "on", "")), ("ELASTICSEARCH_VERIFY_CERTS", If(es_condition, "on", "")), From f7f04537279bc8563ab84eabe7fb2cea06b9c5c6 Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Mon, 18 Sep 2017 20:12:52 -0400 Subject: [PATCH 06/20] add support for using Dokku as the application server --- Makefile | 2 + README.rst | 112 ++++++++++++++++++++++------- stack/__init__.py | 2 + stack/dokku.py | 180 ++++++++++++++++++++++++++++++++++++++++++++++ stack/vpc.py | 63 ++++++++-------- 5 files changed, 302 insertions(+), 57 deletions(-) create mode 100644 stack/dokku.py diff --git a/Makefile b/Makefile index 6137809..3dcc46b 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,8 @@ templates: USE_EB=on USE_NAT_GATEWAY=on python -c 'import stack' > content/eb-nat.json USE_ECS=on python -c 'import stack' > content/ecs-no-nat.json USE_ECS=on USE_NAT_GATEWAY=on python -c 'import stack' > content/ecs-nat.json + USE_DOKKU=on python -c 'import stack' > content/dokku-no-nat.json + # USE_DOKKU=on USE_NAT_GATEWAY=on python -c 'import stack' > content/dokku-nat.json (disabled; need to SSH to instance to deploy) USE_GOVCLOUD=on python -c 'import stack' > content/gc-no-nat.json USE_GOVCLOUD=on USE_NAT_GATEWAY=on python -c 'import stack' > content/gc-nat.json cd content/; mkdir -p `cat ../version.txt`; for file in `ls *nat.json`; do cp $$file `cat ../version.txt`/`echo $$file|cut -d'.' -f1`-`cat ../version.txt`.json; done diff --git a/README.rst b/README.rst index 5a5454a..62a355f 100644 --- a/README.rst +++ b/README.rst @@ -17,24 +17,24 @@ If a NAT gateway is not used, it's possible to create a fully-managed, self-cont environment for your application entirely within the free tier on AWS. To try it out, select one of the following: -+---------------------+-------------------+---------------------------+---------------+ -| | Elastic Beanstalk | Elastic Container Service | EC2 Instances | -+=====================+===================+===========================+===============+ -| Without NAT Gateway | |EB-No-NAT|_ | |ECS-No-NAT|_ | |EC2-No-NAT|_ | -+---------------------+-------------------+---------------------------+---------------+ -| With NAT Gateway | |EB-NAT|_ | |ECS-NAT|_ | |EC2-NAT|_ | -+---------------------+-------------------+---------------------------+---------------+ ++---------------------+-------------------+---------------------------+---------------+-----------------+ +| | Elastic Beanstalk | Elastic Container Service | EC2 Instances | Dokku | ++=====================+===================+===========================+===============+=================+ +| Without NAT Gateway | |EB-No-NAT|_ | |ECS-No-NAT|_ | |EC2-No-NAT|_ | |Dokku-No-NAT|_ | ++---------------------+-------------------+---------------------------+---------------+-----------------+ +| With NAT Gateway | |EB-NAT|_ | |ECS-NAT|_ | |EC2-NAT|_ | n/a | ++---------------------+-------------------+---------------------------+---------------+-----------------+ If you'd like to review the CloudFormation template first, or update an existing stack, you may also wish to use the JSON template directly: -+---------------------+-------------------+---------------------------+--------------------+ -| | Elastic Beanstalk | Elastic Container Service | EC2 Instances | -+=====================+===================+===========================+====================+ -| Without NAT Gateway | `eb-no-nat.json`_ | `ecs-no-nat.json`_ | `ec2-no-nat.json`_ | -+---------------------+-------------------+---------------------------+--------------------+ -| With NAT Gateway | `eb-nat.json`_ | `ecs-nat.json`_ | `ec2-nat.json`_ | -+---------------------+-------------------+---------------------------+--------------------+ ++---------------------+-------------------+---------------------------+--------------------+----------------------+ +| | Elastic Beanstalk | Elastic Container Service | EC2 Instances | Dokku | ++=====================+===================+===========================+====================+======================+ +| Without NAT Gateway | `eb-no-nat.json`_ | `ecs-no-nat.json`_ | `ec2-no-nat.json`_ | `dokku-no-nat.json`_ | ++---------------------+-------------------+---------------------------+--------------------+----------------------+ +| With NAT Gateway | `eb-nat.json`_ | `ecs-nat.json`_ | `ec2-nat.json`_ | n/a | ++---------------------+-------------------+---------------------------+--------------------+----------------------+ .. |EB-No-NAT| image:: https://s3.amazonaws.com/cloudformation-examples/cloudformation-launch-stack.png .. _EB-No-NAT: https://console.aws.amazon.com/cloudformation/home?#/stacks/new?stackName=eb-app-no-nat&templateURL=https://s3.amazonaws.com/aws-container-basics/eb-no-nat.json @@ -60,18 +60,32 @@ wish to use the JSON template directly: .. _EC2-NAT: https://console.aws.amazon.com/cloudformation/home?#/stacks/new?stackName=ec2-app-with-nat&templateURL=https://s3.amazonaws.com/aws-container-basics/ec2-nat.json .. _ec2-nat.json: https://s3.amazonaws.com/aws-container-basics/ec2-nat.json -Elastic Beanstalk, Elastic Container Service, or EC2 instances? ---------------------------------------------------------------- +.. |Dokku-No-NAT| image:: https://s3.amazonaws.com/cloudformation-examples/cloudformation-launch-stack.png +.. _Dokku-No-NAT: https://console.aws.amazon.com/cloudformation/home?#/stacks/new?stackName=dokku-no-nat&templateURL=https://s3.amazonaws.com/aws-container-basics/dokku-no-nat.json +.. _dokku-no-nat.json: https://s3.amazonaws.com/aws-container-basics/dokku-no-nat.json -Elastic Beanstalk is the recommended starting point, unless more complex container service -definitions are required or you prefer to configure application servers manually using Ansible, -Salt, Chef, Puppet, or another such tool. Elastic Beanstalk comes with a preconfigured autoscaling -configuration, allows for automated, managed updates to the underlying servers, allows changing + +Elastic Beanstalk, Elastic Container Service, EC2, or Dokku? +------------------------------------------------------------ + +**Elastic Beanstalk** is the recommended starting point. Elastic Beanstalk comes with a preconfigured +autoscaling configuration, allows for automated, managed updates to the underlying servers, allows changing environment variables without recreating the underlying service, and comes with its own command line tool for managing deployments. The Elastic Beanstalk environment uses the `multicontainer docker environment `_ to maximize flexibility in terms of the application(s) and container(s) deployed to the stack. +**Elastic Container Service (ECS)** might be useful if complex container service definitions are required. + +If you prefer to configure application servers manually using Ansible, Salt, Chef, Puppet, or another such tool, +choose the **EC2** option. Be aware that the instances created are managed by an autoscaling group, so you should +suspend the autoscaling processes on this autoscaling group if you don't want it to bring up new (unprovisioned) +instances. + +For very simple, Heroku-like deploys, choose the **Dokku** option. This will give you a single EC2 instance +based on Ubuntu 16.04 LTS with `Dokku `_ pre-installed and global environment +variables configured that will allow your app to find the Postgres, Redis or Memcached, and Elasticsearch nodes +created with this stack. NAT Gateways ------------ @@ -91,10 +105,11 @@ instance, have been created. SSL Certificate --------------- -The automatically-generated SSL certificate requires approval from the domain owner. After -initiating stack creation, be on the lookout for an email from Amazon to the domain owner -(as seen in a ``whois`` query) and follow the link to approve the certificate. If you're using -a ``.io`` domain, be aware that +For the Elastic Beanstalk, Elastic Container Service, and EC2 (non-GovCloud) options, an +automatically-generated SSL certificate is included. The certificate requires approval from the +domain owner before it can be issued, and *your stack creation will not finish until you approve +the request*. Be on the lookout for an email from Amazon to the domain owner (as seen in a ``whois`` +query) and follow the link to approve the certificate. If you're using a ``.io`` domain, be aware that `prior steps `_ may be necessary to receive email for ``.io`` domains, because domain owner emails cannot be discovered via ``whois``. @@ -282,11 +297,58 @@ Once complete, the EB environment should be running a copy of your container. To issues with the deployment, review events and logs via the Elastic Beanstack section of the AWS console. -Good luck! +Dokku +----- + +DNS +~~~ + +After the stack is created, you'll want to inspect the Outputs for the PublicIP of the instance and +create a DNS ``A`` record (possibly including a wildcard record, if you're using vhost-based apps) +for your chosen domain. + +For help creating a DNS record, please refer to the `Dokku DNS documentation +`_. + +Deployment +~~~~~~~~~~ + +Dokku may take 5-10 minutes to install, even after the stack creation has finished. Once installation +is complete, create a new app on the remote server:: + + ssh dokku@ apps:create python-sample + +and deploy Heroku's Python sample to that app:: + + git clone https://github.com/heroku/python-sample.git + cd python-sample + git remote add dokku dokku@:python-sample + git push dokku master + +You should be able to watch the build complete in the output from the ``git push`` command. If the +deploy completes successfully, you should be able to see "Hello world!" at +http://python-sample.your.domain/ + +For additional help deploying to your new instance, please refer to the `Dokku documentation +`_. + +Let's Encrypt +~~~~~~~~~~~~~ + +The Dokku option does not create a load balancer and hence does not include a free SSL certificate +via Amazon Certificate Manager, so let's create one with Let's Encrypt instance:: + + ssh ubuntu@ sudo dokku plugin:install https://github.com/dokku/dokku-letsencrypt.git + ssh dokku@ config:set --no-restart python-sample DOKKU_LETSENCRYPT_EMAIL=your@email.tld + ssh dokku@ letsencrypt python-sample + +The Python sample app should now be accessible over HTTPS at https://python-sample.your.domain/ Contributing ------------ Please read `contributing guidelines here `_. +Good luck and have fun! + Copyright 2017 Jean-Phillipe Serafin, Tobias McNulty. diff --git a/stack/__init__.py b/stack/__init__.py index f423afd..ed4954e 100644 --- a/stack/__init__.py +++ b/stack/__init__.py @@ -18,6 +18,8 @@ elif os.environ.get('USE_EB') == 'on': from . import repository # noqa: F401 from . import eb # noqa: F401 +elif os.environ.get('USE_DOKKU') == 'on': + from . import dokku # noqa: F401 else: # USE_GOVCLOUD and USE_EC2 both provide EC2 instances from . import instances # noqa: F401 diff --git a/stack/dokku.py b/stack/dokku.py new file mode 100644 index 0000000..c372287 --- /dev/null +++ b/stack/dokku.py @@ -0,0 +1,180 @@ +import troposphere.ec2 as ec2 +import troposphere.iam as iam +from troposphere import Base64, FindInMap, Join, Output, Parameter, Ref, Tags + +from .assets import assets_management_policy +from .common import container_instance_type +from .domain import domain_name +from .environment import environment_variables +from .logs import logging_policy +from .template import template +from .vpc import container_a_subnet, vpc + +key_name = template.add_parameter(Parameter( + "KeyName", + Description="Name of an existing EC2 KeyPair to enable SSH access to " + "the AWS EC2 instances", + Type="AWS::EC2::KeyPair::KeyName", + ConstraintDescription="must be the name of an existing EC2 KeyPair." +)) + +dokku_version = template.add_parameter(Parameter( + "DokkuVersion", + Description="Dokku version to install, e.g., \"v0.10.4\" (see https://github.com/dokku/dokku/releases).", + Type="String", + Default="v0.10.4", +)) + +dokku_web_config = template.add_parameter(Parameter( + "DokkuWebConfig", + Description="Whether or not to enable the Dokku web config (defaults to false for security reasons).", + Type="String", + AllowedValues=["true", "false"], + Default="false", +)) + +dokku_vhost_enable = template.add_parameter(Parameter( + "DokkuVhostEnable", + Description="Whether or not to use vhost-based deployments (e.g., foo.domain.name).", + Type="String", + AllowedValues=["true", "false"], + Default="true", +)) + +root_size = template.add_parameter(Parameter( + "RootVolumeSize", + Description="The size of the root volumne (in GB).", + Type="Number", + Default="30", +)) + +ssh_cidr = template.add_parameter(Parameter( + "SshCidr", + Description="CIDR block from which to allow SSH access. Restrict this to your IP, if possible.", + Type="String", + Default="0.0.0.0/0", +)) + +# "16.04 hvm ssd" AMIs from https://cloud-images.ubuntu.com/locator/ec2/ +template.add_mapping('RegionMap', { + "ap-northeast-1": {"AMI": "ami-0417e362"}, + "ap-northeast-2": {"AMI": "ami-536ab33d"}, + "ap-south-1": {"AMI": "ami-df413bb0"}, + "ap-southeast-1": {"AMI": "ami-9f28b3fc"}, + "ap-southeast-2": {"AMI": "ami-bb1901d8"}, + "ca-central-1": {"AMI": "ami-a9c27ccd"}, + "eu-central-1": {"AMI": "ami-958128fa"}, + "eu-west-1": {"AMI": "ami-674cbc1e"}, + "eu-west-2": {"AMI": "ami-03998867"}, + "sa-east-1": {"AMI": "ami-a41869c8"}, + "us-east-1": {"AMI": "ami-1d4e7a66"}, + "us-east-2": {"AMI": "ami-dbbd9dbe"}, + "us-west-1": {"AMI": "ami-969ab1f6"}, + "us-west-2": {"AMI": "ami-8803e0f0"}, +}) + +# EC2 instance role +instance_role = iam.Role( + "ContainerInstanceRole", + template=template, + AssumeRolePolicyDocument=dict(Statement=[dict( + Effect="Allow", + Principal=dict(Service=["ec2.amazonaws.com"]), + Action=["sts:AssumeRole"], + )]), + Path="/", + Policies=[ + assets_management_policy, + logging_policy, + ] +) + +# EC2 instance profile +instance_profile = iam.InstanceProfile( + "ContainerInstanceProfile", + template=template, + Path="/", + Roles=[Ref(instance_role)], +) + +# EC2 security group +security_group = template.add_resource(ec2.SecurityGroup( + 'SecurityGroup', + GroupDescription='Allows SSH access from SshCidr and HTTP/HTTPS access from anywhere.', + VpcId=Ref(vpc), + SecurityGroupIngress=[ + ec2.SecurityGroupRule( + IpProtocol='tcp', + FromPort=22, + ToPort=22, + CidrIp=Ref(ssh_cidr), + ), + ec2.SecurityGroupRule( + IpProtocol='tcp', + FromPort=80, + ToPort=80, + CidrIp='0.0.0.0/0', + ), + ec2.SecurityGroupRule( + IpProtocol='tcp', + FromPort=443, + ToPort=443, + CidrIp='0.0.0.0/0', + ), + ] +)) + +# Elastic IP for EC2 instance +eip = template.add_resource(ec2.EIP("Eip")) + +# The Dokku EC2 instance +ec2_instance = template.add_resource(ec2.Instance( + 'Ec2Instance', + ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), + InstanceType=container_instance_type, + KeyName=Ref(key_name), + SecurityGroupIds=[Ref(security_group)], + IamInstanceProfile=Ref(instance_profile), + SubnetId=Ref(container_a_subnet), + BlockDeviceMappings=[ + ec2.BlockDeviceMapping( + DeviceName="/dev/sda1", + Ebs=ec2.EBSBlockDevice( + VolumeSize=Ref(root_size), + ) + ), + ], + UserData=Base64(Join('', [ + '#!/bin/bash\n', + 'sudo apt-get update\n', + 'wget https://raw.githubusercontent.com/dokku/dokku/', Ref(dokku_version), '/bootstrap.sh\n', + 'sudo', + ' DOKKU_TAG=', Ref(dokku_version), + ' DOKKU_VHOST_ENABLE=', Ref(dokku_vhost_enable), + ' DOKKU_WEB_CONFIG=', Ref(dokku_web_config), + ' DOKKU_HOSTNAME=', domain_name, + ' DOKKU_KEY_FILE=/home/ubuntu/.ssh/authorized_keys', # use the key configured by key_name + ' bash bootstrap.sh', + '\n', + 'sudo -u dokku dokku config:set --global' + ] + [' %s=%s' % env_pair for env_pair in environment_variables] + ['\n'] + )), + Tags=Tags( + Name=Ref("AWS::StackName"), + ), +)) + +# Associate the Elastic IP separately, so it doesn't change when the instance changes. +eip_assoc = template.add_resource(ec2.EIPAssociation( + "EipAssociation", + InstanceId=Ref(ec2_instance), + EIP=Ref(eip), +)) + +template.add_output([ + Output( + "PublicIP", + Description="Public IP address of Elastic IP associated with the Dokku instance", + Value=Ref(eip), + ), +]) diff --git a/stack/vpc.py b/stack/vpc.py index 88efee6..8ffc633 100644 --- a/stack/vpc.py +++ b/stack/vpc.py @@ -16,6 +16,7 @@ from .template import template USE_NAT_GATEWAY = os.environ.get('USE_NAT_GATEWAY') == 'on' +USE_DOKKU = os.environ.get('USE_DOKKU') == 'on' primary_az = template.add_parameter(Parameter( @@ -108,41 +109,39 @@ ) -# Holds load balancer -loadbalancer_a_subnet_cidr = "10.0.2.0/24" -loadbalancer_a_subnet = Subnet( - "LoadbalancerASubnet", - template=template, - VpcId=Ref(vpc), - CidrBlock=loadbalancer_a_subnet_cidr, - AvailabilityZone=Ref(primary_az), -) - - -SubnetRouteTableAssociation( - "LoadbalancerASubnetRouteTableAssociation", - template=template, - RouteTableId=Ref(public_route_table), - SubnetId=Ref(loadbalancer_a_subnet), -) - +if not USE_DOKKU: + # Holds load balancer + loadbalancer_a_subnet_cidr = "10.0.2.0/24" + loadbalancer_a_subnet = Subnet( + "LoadbalancerASubnet", + template=template, + VpcId=Ref(vpc), + CidrBlock=loadbalancer_a_subnet_cidr, + AvailabilityZone=Ref(primary_az), + ) -loadbalancer_b_subnet_cidr = "10.0.3.0/24" -loadbalancer_b_subnet = Subnet( - "LoadbalancerBSubnet", - template=template, - VpcId=Ref(vpc), - CidrBlock=loadbalancer_b_subnet_cidr, - AvailabilityZone=Ref(secondary_az), -) + SubnetRouteTableAssociation( + "LoadbalancerASubnetRouteTableAssociation", + template=template, + RouteTableId=Ref(public_route_table), + SubnetId=Ref(loadbalancer_a_subnet), + ) + loadbalancer_b_subnet_cidr = "10.0.3.0/24" + loadbalancer_b_subnet = Subnet( + "LoadbalancerBSubnet", + template=template, + VpcId=Ref(vpc), + CidrBlock=loadbalancer_b_subnet_cidr, + AvailabilityZone=Ref(secondary_az), + ) -SubnetRouteTableAssociation( - "LoadbalancerBSubnetRouteTableAssociation", - template=template, - RouteTableId=Ref(public_route_table), - SubnetId=Ref(loadbalancer_b_subnet), -) + SubnetRouteTableAssociation( + "LoadbalancerBSubnetRouteTableAssociation", + template=template, + RouteTableId=Ref(public_route_table), + SubnetId=Ref(loadbalancer_b_subnet), + ) if USE_NAT_GATEWAY: From ae322747ddb02dbdd175589a738d759fe5b3fd0f Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Mon, 18 Sep 2017 22:32:17 -0400 Subject: [PATCH 07/20] correct dokku environment variable configuration --- stack/dokku.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stack/dokku.py b/stack/dokku.py index c372287..be73b5b 100644 --- a/stack/dokku.py +++ b/stack/dokku.py @@ -1,3 +1,5 @@ +from itertools import chain + import troposphere.ec2 as ec2 import troposphere.iam as iam from troposphere import Base64, FindInMap, Join, Output, Parameter, Ref, Tags @@ -127,6 +129,7 @@ # Elastic IP for EC2 instance eip = template.add_resource(ec2.EIP("Eip")) + # The Dokku EC2 instance ec2_instance = template.add_resource(ec2.Instance( 'Ec2Instance', @@ -156,8 +159,8 @@ ' DOKKU_KEY_FILE=/home/ubuntu/.ssh/authorized_keys', # use the key configured by key_name ' bash bootstrap.sh', '\n', - 'sudo -u dokku dokku config:set --global' - ] + [' %s=%s' % env_pair for env_pair in environment_variables] + ['\n'] + 'dokku config:set --global', + ] + list(chain(*[(' %s=' % k, v) for k, v in environment_variables])) + ['\n'] )), Tags=Tags( Name=Ref("AWS::StackName"), From ca468b15754f1ce3f0458e3ac70e252d819d7e7f Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Wed, 20 Sep 2017 00:26:20 -0400 Subject: [PATCH 08/20] automatically update Dokku global environment variables when stack metadata changes --- stack/dokku.py | 123 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 108 insertions(+), 15 deletions(-) diff --git a/stack/dokku.py b/stack/dokku.py index be73b5b..b71833f 100644 --- a/stack/dokku.py +++ b/stack/dokku.py @@ -1,8 +1,8 @@ -from itertools import chain - +import troposphere.cloudformation as cloudformation import troposphere.ec2 as ec2 import troposphere.iam as iam from troposphere import Base64, FindInMap, Join, Output, Parameter, Ref, Tags +from troposphere.policies import CreationPolicy, ResourceSignal from .assets import assets_management_policy from .common import container_instance_type @@ -131,8 +131,9 @@ # The Dokku EC2 instance +ec2_instance_name = 'Ec2Instance' ec2_instance = template.add_resource(ec2.Instance( - 'Ec2Instance', + ec2_instance_name, ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType=container_instance_type, KeyName=Ref(key_name), @@ -147,21 +148,113 @@ ) ), ], + CreationPolicy=CreationPolicy( + ResourceSignal=ResourceSignal( + Timeout='PT10M', # 10 minutes + ), + ), UserData=Base64(Join('', [ '#!/bin/bash\n', + # install cfn helper scripts; modified from: + # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-helper-scripts-reference.html 'sudo apt-get update\n', - 'wget https://raw.githubusercontent.com/dokku/dokku/', Ref(dokku_version), '/bootstrap.sh\n', - 'sudo', - ' DOKKU_TAG=', Ref(dokku_version), - ' DOKKU_VHOST_ENABLE=', Ref(dokku_vhost_enable), - ' DOKKU_WEB_CONFIG=', Ref(dokku_web_config), - ' DOKKU_HOSTNAME=', domain_name, - ' DOKKU_KEY_FILE=/home/ubuntu/.ssh/authorized_keys', # use the key configured by key_name - ' bash bootstrap.sh', - '\n', - 'dokku config:set --global', - ] + list(chain(*[(' %s=' % k, v) for k, v in environment_variables])) + ['\n'] - )), + 'apt-get -y install python-pip\n', + 'pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n', + 'cp /usr/local/init/ubuntu/cfn-hup /etc/init.d/cfn-hup\n', + 'chmod +x /etc/init.d/cfn-hup\n', + # don't start cfn-hup yet, since we need to install cfn-hup.conf first + 'update-rc.d cfn-hup defaults\n', + # call our "on_first_boot" configset (defined below): + 'cfn-init --stack="', Ref('AWS::StackName'), '" --region=', Ref('AWS::Region'), + ' -r %s -c on_first_boot\n' % ec2_instance_name, + # send the exit code from cfn-init to our CreationPolicy: + 'cfn-signal -e $? --stack="', Ref('AWS::StackName'), '" --region=', Ref('AWS::Region'), + ' --resource %s\n' % ec2_instance_name, + ])), + Metadata=cloudformation.Metadata( + cloudformation.Init( + cloudformation.InitConfigSets( + on_first_boot=['install_dokku', 'set_dokku_env', 'start_cfn_hup'], + on_metadata_update=['set_dokku_env'], + ), + # TODO: figure out how to reinstall Dokku if the version is changed (?) + install_dokku=cloudformation.InitConfig( + commands={ + '01_fetch': { + 'command': Join('', [ + 'wget https://raw.githubusercontent.com/dokku/dokku/', + Ref(dokku_version), + '/bootstrap.sh', + ]), + 'cwd': '~', + }, + '02_install': { + # docker-ce fails to install with this error if bootstrap.sh is run without sudo: + # "debconf: delaying package configuration, since apt-utils is not installed" + 'command': 'sudo -E bash bootstrap.sh', # use -E to make sure bash gets our env + 'env': { + 'DOKKU_TAG': Ref(dokku_version), + 'DOKKU_VHOST_ENABLE': Ref(dokku_vhost_enable), + 'DOKKU_WEB_CONFIG': Ref(dokku_web_config), + 'DOKKU_HOSTNAME': domain_name, + 'DOKKU_KEY_FILE': '/home/ubuntu/.ssh/authorized_keys', # use the key configured by key_name + }, + 'cwd': '~', + }, + }, + ), + set_dokku_env=cloudformation.InitConfig( + commands={ + '01_set_env': { + # redirect output to /dev/null so we don't write environment variables to log file + 'command': 'dokku config:set --global {} >/dev/null'.format( + ' '.join(['=$'.join([k, k]) for k in dict(environment_variables).keys()]), + ), + 'env': dict(environment_variables), + }, + }, + ), + start_cfn_hup=cloudformation.InitConfig( + commands={ + '01_start': { + 'command': 'service cfn-hup start', + }, + }, + files={ + '/etc/cfn/cfn-hup.conf': { + 'content': Join('', [ + '[main]\n', + 'stack=', Ref('AWS::StackName'), '\n', + 'region=', Ref('AWS::Region'), '\n', + 'umask=022\n', + 'interval=1\n', # check for changes every minute + 'verbose=true\n', + ]), + 'mode': '000400', + 'owner': 'root', + 'group': 'root', + }, + '/etc/cfn/hooks.d/cfn-auto-reloader.conf': { + 'content': Join('', [ + # trigger the on_metadata_update configset on any changes to Ec2Instance metadata + '[cfn-auto-reloader-hook]\n', + 'triggers=post.update\n', + 'path=Resources.%s.Metadata\n' % ec2_instance_name, + 'action=/usr/local/bin/cfn-init', + ' --stack=', Ref('AWS::StackName'), + ' --resource=%s' % ec2_instance_name, + ' --configsets=on_metadata_update', + ' --region=', Ref('AWS::Region'), '\n', + 'runas=root\n', + ]), + 'mode': '000400', + 'owner': 'root', + 'group': 'root', + }, + }, + ), + ), + ), Tags=Tags( Name=Ref("AWS::StackName"), ), From 28ec7349a90b41f674a60a4a36e287d3d85c4e1f Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Wed, 20 Sep 2017 00:32:25 -0400 Subject: [PATCH 09/20] add note to README about Dokku env updates --- README.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.rst b/README.rst index 62a355f..cffc345 100644 --- a/README.rst +++ b/README.rst @@ -344,6 +344,15 @@ via Amazon Certificate Manager, so let's create one with Let's Encrypt instance: The Python sample app should now be accessible over HTTPS at https://python-sample.your.domain/ +Updating Environment Variables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If metadata associated with the Dokku EC2 instance changes, updates to environment variables, if +any, will be passed to the live server via `cfn-hup +`_. Depending on the +nature of the update this may or may not result the instance being stopped and restarted. Inspect +the stack update confirmation page carefully to avoid any unexpected instance recreations. + Contributing ------------ From 46f941546664c3fd13ffd6bf2425254afe7f776f Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Wed, 20 Sep 2017 00:36:53 -0400 Subject: [PATCH 10/20] README clarifications for Dokku --- README.rst | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/README.rst b/README.rst index cffc345..d7a6fba 100644 --- a/README.rst +++ b/README.rst @@ -300,6 +300,10 @@ console. Dokku ----- +The CloudFormation stack creation should not finish until Dokku is fully installed; `cfn-signal +`_ is used in the +template to signal CloudFormation once the installation is complete. + DNS ~~~ @@ -310,15 +314,26 @@ for your chosen domain. For help creating a DNS record, please refer to the `Dokku DNS documentation `_. +Environment Variables +~~~~~~~~~~~~~~~~~~~~~ + +The environment variables for the other resources created in this stack will be passed to Dokku +as global environment variables. + +If metadata associated with the Dokku EC2 instance changes, updates to environment variables, if +any, will be passed to the live server via `cfn-hup +`_. Depending on the +nature of the update this may or may not result the instance being stopped and restarted. Inspect +the stack update confirmation page carefully to avoid any unexpected instance recreations. + Deployment ~~~~~~~~~~ -Dokku may take 5-10 minutes to install, even after the stack creation has finished. Once installation -is complete, create a new app on the remote server:: +You can create a new app on the remote server like so:: ssh dokku@ apps:create python-sample -and deploy Heroku's Python sample to that app:: +and then deploy Heroku's Python sample to that app:: git clone https://github.com/heroku/python-sample.git cd python-sample @@ -344,15 +359,6 @@ via Amazon Certificate Manager, so let's create one with Let's Encrypt instance: The Python sample app should now be accessible over HTTPS at https://python-sample.your.domain/ -Updating Environment Variables -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If metadata associated with the Dokku EC2 instance changes, updates to environment variables, if -any, will be passed to the live server via `cfn-hup -`_. Depending on the -nature of the update this may or may not result the instance being stopped and restarted. Inspect -the stack update confirmation page carefully to avoid any unexpected instance recreations. - Contributing ------------ From 6e634bd42e0d53d165d0acd429dd3ef1892a2f51 Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Thu, 21 Sep 2017 09:43:31 -0400 Subject: [PATCH 11/20] clarify Dokku SSH key usage in README and add a line for renewing your Lets Encrypt cert --- README.rst | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index d7a6fba..05286a4 100644 --- a/README.rst +++ b/README.rst @@ -300,6 +300,10 @@ console. Dokku ----- +When creating a Dokku stack, you may find it advantageous to upload your normal SSH public key to +AWS, rather than using one that AWS generates. This way, you'll already be set up to deploy to your +Dokku instance without needing to keep track of an extra SSH private key. + The CloudFormation stack creation should not finish until Dokku is fully installed; `cfn-signal `_ is used in the template to signal CloudFormation once the installation is complete. @@ -329,7 +333,9 @@ the stack update confirmation page carefully to avoid any unexpected instance re Deployment ~~~~~~~~~~ -You can create a new app on the remote server like so:: +You can create a new app on the remote server like so, using the same SSH key that you specified +during the stack creation process (if you did use your default public key, add ``-i /path/to/key.pub`` +to this command):: ssh dokku@ apps:create python-sample @@ -350,12 +356,14 @@ For additional help deploying to your new instance, please refer to the `Dokku d Let's Encrypt ~~~~~~~~~~~~~ -The Dokku option does not create a load balancer and hence does not include a free SSL certificate -via Amazon Certificate Manager, so let's create one with Let's Encrypt instance:: +The Dokku stack does not create a load balancer and hence does not include a free SSL certificate +via Amazon Certificate Manager, so let's create one with the Let's Encrypt plugin, and add a cron +job to automatically renew the cert as needed:: ssh ubuntu@ sudo dokku plugin:install https://github.com/dokku/dokku-letsencrypt.git ssh dokku@ config:set --no-restart python-sample DOKKU_LETSENCRYPT_EMAIL=your@email.tld ssh dokku@ letsencrypt python-sample + ssh dokku@ letsencrypt:cron-job --add python-sample The Python sample app should now be accessible over HTTPS at https://python-sample.your.domain/ From 85b7191c71c909af82f7a7982a40909db0431daa Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Thu, 21 Sep 2017 09:43:56 -0400 Subject: [PATCH 12/20] correct volumne -> volume" --- stack/dokku.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack/dokku.py b/stack/dokku.py index b71833f..9816c2f 100644 --- a/stack/dokku.py +++ b/stack/dokku.py @@ -45,7 +45,7 @@ root_size = template.add_parameter(Parameter( "RootVolumeSize", - Description="The size of the root volumne (in GB).", + Description="The size of the root volume (in GB).", Type="Number", Default="30", )) From 722113bbc441887a6c7e4662299f59c28dd34b86 Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Thu, 21 Sep 2017 09:45:32 -0400 Subject: [PATCH 13/20] remove unnecessary sudo --- stack/dokku.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack/dokku.py b/stack/dokku.py index 9816c2f..921cf1b 100644 --- a/stack/dokku.py +++ b/stack/dokku.py @@ -157,7 +157,7 @@ '#!/bin/bash\n', # install cfn helper scripts; modified from: # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-helper-scripts-reference.html - 'sudo apt-get update\n', + 'apt-get update\n', 'apt-get -y install python-pip\n', 'pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n', 'cp /usr/local/init/ubuntu/cfn-hup /etc/init.d/cfn-hup\n', From 30e7f17594324c30e948da8fc02ad7f745637273 Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Wed, 27 Sep 2017 09:15:03 -0400 Subject: [PATCH 14/20] make sure DOKKU_SKIP_KEY_FILE=false --- stack/dokku.py | 1 + 1 file changed, 1 insertion(+) diff --git a/stack/dokku.py b/stack/dokku.py index 921cf1b..aaa89ba 100644 --- a/stack/dokku.py +++ b/stack/dokku.py @@ -198,6 +198,7 @@ 'DOKKU_WEB_CONFIG': Ref(dokku_web_config), 'DOKKU_HOSTNAME': domain_name, 'DOKKU_KEY_FILE': '/home/ubuntu/.ssh/authorized_keys', # use the key configured by key_name + 'DOKKU_SKIP_KEY_FILE': 'false', # should be the default, but be explicit just in case }, 'cwd': '~', }, From 301e8bb57e1f8c77f26f94f11b67d899251e14ff Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Wed, 27 Sep 2017 09:18:45 -0400 Subject: [PATCH 15/20] correct README notes about Dokku SSH key usage --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 05286a4..7a23984 100644 --- a/README.rst +++ b/README.rst @@ -334,8 +334,8 @@ Deployment ~~~~~~~~~~ You can create a new app on the remote server like so, using the same SSH key that you specified -during the stack creation process (if you did use your default public key, add ``-i /path/to/key.pub`` -to this command):: +during the stack creation process (if you didn't use your shell's default SSH key, you'll need to +add ``-i /path/to/key.pub`` to this command):: ssh dokku@ apps:create python-sample From 5d26bea815ef53240a3b75417284a56355384999 Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Wed, 27 Sep 2017 09:23:22 -0400 Subject: [PATCH 16/20] need to provide the private key, not public key, when SSHing to the Dokku instance --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 7a23984..9ea2d54 100644 --- a/README.rst +++ b/README.rst @@ -335,7 +335,7 @@ Deployment You can create a new app on the remote server like so, using the same SSH key that you specified during the stack creation process (if you didn't use your shell's default SSH key, you'll need to -add ``-i /path/to/key.pub`` to this command):: +add ``-i /path/to/private_key`` to this command):: ssh dokku@ apps:create python-sample From 7059424c4b252a0dce8e47584a4e09754fcab30e Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Wed, 27 Sep 2017 10:47:16 -0400 Subject: [PATCH 17/20] in README, note the value of *_URL environment variables if no instance is created, correct name of CACHE_URL, and add missing AWS_REGION --- README.rst | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/README.rst b/README.rst index 5a5454a..c854944 100644 --- a/README.rst +++ b/README.rst @@ -169,6 +169,7 @@ Once your environment is created you'll have an Elastic Beanstalk (EB) or Elasti (ECS) environment with the environment variables you need to run a containerized web application. These environment variables are: +* ``AWS_REGION``: The AWS region in which your stack was created. * ``AWS_STORAGE_BUCKET_NAME``: The name of the S3 bucket in which your application should store static assets * ``AWS_PRIVATE_STORAGE_BUCKET_NAME``: The name of the S3 bucket in which your application should @@ -176,7 +177,9 @@ These environment variables are: authentication to read objects and encrypt them at rest, if needed. * ``CDN_DOMAIN_NAME``: The domain name of the CloudFront distribution connected to the above S3 bucket; you should use this (or the S3 bucket URL directly) to refer to static assets in your HTML -* ``ELASTICSEARCH_ENDPOINT``: The domain name of the Elasticsearch instance. +* ``ELASTICSEARCH_ENDPOINT``: The domain name of the Elasticsearch instance. If ``(none)`` is selected + for the ``ElasticsearchInstanceType`` during stack creation, the value of this variable will be + ``'none-created'``. * ``ELASTICSEARCH_PORT``: The recommended port for connecting to Elasticsearch (defaults to 443). * ``ELASTICSEARCH_USE_SSL``: Whether or not to use SSL (defaults to ``'on'``). * ``ELASTICSEARCH_VERIFY_CERTS``: Whether or not to verify Elasticsearch SSL certificates. This @@ -189,11 +192,14 @@ These environment variables are: stack. These domains, if any, will also be included in the automatically-generated SSL certificate and S3 CORS configuration. * ``SECRET_KEY``: The secret key you specified when creating this stack -* ``DATABASE_URL``: The URL to the RDS instance created as part of this stack. -* ``REDIS_URL``: The URL to the Redis instance created as part of this stack (may be used as a cache - or session storage, e.g.). Note that Redis supports multiple databases and no database ID is - included as part of the URL, so you should append a forward slash and the integer index of the - database, if needed, e.g., ``/0``. +* ``DATABASE_URL``: The URL to the RDS instance created as part of this stack. If ``(none)`` is + selected for the ``DatabaseClass`` during stack creation, the value of this variable will be + ``'none-created'``. +* ``CACHE_URL``: The URL to the Redis or Memcached instance created as part of this stack (may be + used as a cache or session storage, e.g.). If using Redis, note that it supports multiple + databases and no database ID is included as part of the URL, so you should append a forward slash + and the integer index of the database, if needed, e.g., ``/0``. If ``(none)`` is selected for the + ``CacheNodeType`` during stack creation, the value of this variable will be ``'none-created'``. When running an EB stack, you can view and edit the keys and values for all environment variables on the fly via the Elastic Beanstalk console or command line tools. From 12a73f861c1f453bf9163dcf9f286e6f88b5479b Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Wed, 27 Sep 2017 10:53:52 -0400 Subject: [PATCH 18/20] add changelog entry for 1.2.0 --- CHANGELOG.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 472ac22..f2d732a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,21 @@ Change Log ========== +`1.2.0`_ (2017-09-27) +--------------------- + +Features: + +* The RDS instance, Elasticache instance, and Elasticsearch instance can all now be optionally + removed from the stack by setting the instance type to ``(none)`` in the relevant CloudFormation + parameter. +* Support for using a single `Dokku `_ instance as an application + server was added. Dokku is installed automatically on an Ubuntu 16.04 LTS instance, configured + with the options selected via CloudFormation parameters, and provided the environment variables + needed to access the related resources (such as the database, cache, or Elasticsearch instance) + created with this stack. For more information, please see `the README + `_. + `1.1.2`_ (2017-09-26) --------------------- From cbd66fdb7bf9fd9e9f7aa380a05efe8616237241 Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Wed, 27 Sep 2017 10:54:04 -0400 Subject: [PATCH 19/20] version bump to 1.2.0 --- version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.txt b/version.txt index 45a1b3f..26aaba0 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -1.1.2 +1.2.0 From 04bb3b4ac7fc067e89c48f523a201fb6c6654242 Mon Sep 17 00:00:00 2001 From: Tobias McNulty Date: Wed, 27 Sep 2017 11:53:01 -0400 Subject: [PATCH 20/20] the 'C' in ElastiCache is capitalized --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f2d732a..2caea3e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,7 +7,7 @@ Change Log Features: -* The RDS instance, Elasticache instance, and Elasticsearch instance can all now be optionally +* The RDS instance, ElastiCache instance, and Elasticsearch instance can all now be optionally removed from the stack by setting the instance type to ``(none)`` in the relevant CloudFormation parameter. * Support for using a single `Dokku `_ instance as an application