diff --git a/aws-py-appsync/__main__.py b/aws-py-appsync/__main__.py index f3fac2163..0c06b1832 100644 --- a/aws-py-appsync/__main__.py +++ b/aws-py-appsync/__main__.py @@ -7,10 +7,10 @@ table = dynamodb.Table( "tenants", hash_key="id", - attributes=[{ - "name": "id", - "type": "S" - }], + attributes=[dynamodb.TableAttributeArgs( + name="id", + type="S" + )], read_capacity=1, write_capacity=1) @@ -44,7 +44,7 @@ attachment = iam.RolePolicyAttachment( "iam-policy-attachment", - role=role, + role=role.name, policy_arn=policy.arn) ## GraphQL Schema @@ -82,8 +82,8 @@ random_string = random.RandomString( "random-datasource-name", length=15, - special="false", - number="false", + special=False, + number=False, ) ## Link a data source to the Dynamo DB Table @@ -92,9 +92,9 @@ name=random_string.result, api_id=api.id, type="AMAZON_DYNAMODB", - dynamodb_config={ - "table_name": table.name - }, + dynamodb_config=appsync.DataSourceDynamodbConfigArgs( + table_name=table.name, + ), service_role_arn=role.arn) ## A resolver for the [getTenantById] query diff --git a/aws-py-ec2-provisioners/__main__.py b/aws-py-ec2-provisioners/__main__.py index 15f19c371..d48ada90d 100644 --- a/aws-py-ec2-provisioners/__main__.py +++ b/aws-py-ec2-provisioners/__main__.py @@ -25,8 +25,8 @@ def decode_key(key): secgrp = aws.ec2.SecurityGroup('secgrp', description='Foo', ingress=[ - { 'protocol': 'tcp', 'from_port': 22, 'to_port': 22, 'cidr_blocks': ['0.0.0.0/0'] }, - { 'protocol': 'tcp', 'from_port': 80, 'to_port': 80, 'cidr_blocks': ['0.0.0.0/0'] }, + aws.ec2.SecurityGroupIngressArgs(protocol='tcp', from_port=22, to_port=22, cidr_blocks=['0.0.0.0/0']), + aws.ec2.SecurityGroupIngressArgs(protocol='tcp', from_port=80, to_port=80, cidr_blocks=['0.0.0.0/0']), ], ) @@ -34,10 +34,10 @@ def decode_key(key): ami = aws.get_ami( owners=['amazon'], most_recent=True, - filters=[{ - 'name': 'name', - 'values': ['amzn2-ami-hvm-2.0.????????-x86_64-gp2'], - }], + filters=[aws.GetAmiFilterArgs( + name='name', + values=['amzn2-ami-hvm-2.0.????????-x86_64-gp2'], + )], ) # Create an EC2 server that we'll then provision stuff onto. diff --git a/aws-py-eks/__main__.py b/aws-py-eks/__main__.py index b388b0a98..a37449afa 100644 --- a/aws-py-eks/__main__.py +++ b/aws-py-eks/__main__.py @@ -8,12 +8,14 @@ eks_cluster = eks.Cluster( 'eks-cluster', role_arn=iam.eks_role.arn, - tags= {'Name':'pulumi-eks-cluster'}, - vpc_config = { - 'publicAccessCidrs': ['0.0.0.0/0'], - 'security_group_ids': [vpc.eks_security_group.id], - 'subnet_ids': vpc.subnet_ids, - } + tags={ + 'Name': 'pulumi-eks-cluster', + }, + vpc_config=eks.ClusterVpcConfigArgs( + public_access_cidrs=['0.0.0.0/0'], + security_group_ids=[vpc.eks_security_group.id], + subnet_ids=vpc.subnet_ids, + ), ) eks_node_group = eks.NodeGroup( @@ -23,13 +25,13 @@ node_role_arn=iam.ec2_role.arn, subnet_ids=vpc.subnet_ids, tags={ - 'Name' : 'pulumi-cluster-nodeGroup' + 'Name': 'pulumi-cluster-nodeGroup', }, - scaling_config = { - 'desired_size': 2, - 'max_size': 2, - 'min_size': 1, - }, + scaling_config=eks.NodeGroupScalingConfigArgs( + desired_size=2, + max_size=2, + min_size=1, + ), ) pulumi.export('cluster-name', eks_cluster.name) diff --git a/aws-py-eks/iam.py b/aws-py-eks/iam.py index 0f469dd6c..43049ef8d 100644 --- a/aws-py-eks/iam.py +++ b/aws-py-eks/iam.py @@ -5,7 +5,6 @@ eks_role = iam.Role( 'eks-iam-role', - assume_role_policy=json.dumps({ 'Version': '2012-10-17', 'Statement': [ @@ -17,21 +16,21 @@ 'Effect': 'Allow', 'Sid': '' } - ] - }) + ], + }), ) iam.RolePolicyAttachment( 'eks-service-policy-attachment', role=eks_role.id, - policy_arn='arn:aws:iam::aws:policy/AmazonEKSServicePolicy' - ) + policy_arn='arn:aws:iam::aws:policy/AmazonEKSServicePolicy', +) iam.RolePolicyAttachment( 'eks-cluster-policy-attachment', role=eks_role.id, - policy_arn='arn:aws:iam::aws:policy/AmazonEKSClusterPolicy' + policy_arn='arn:aws:iam::aws:policy/AmazonEKSClusterPolicy', ) ## Ec2 NodeGroup Role @@ -49,25 +48,25 @@ 'Effect': 'Allow', 'Sid': '' } - ] - }) + ], + }), ) iam.RolePolicyAttachment( 'eks-workernode-policy-attachment', role=ec2_role.id, - policy_arn='arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy' + policy_arn='arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy', ) iam.RolePolicyAttachment( 'eks-cni-policy-attachment', role=ec2_role.id, - policy_arn='arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy' + policy_arn='arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy', ) iam.RolePolicyAttachment( 'ec2-container-ro-policy-attachment', role=ec2_role.id, - policy_arn='arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly' + policy_arn='arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly', ) diff --git a/aws-py-eks/vpc.py b/aws-py-eks/vpc.py index 1dd9dd684..eed84e055 100644 --- a/aws-py-eks/vpc.py +++ b/aws-py-eks/vpc.py @@ -1,37 +1,36 @@ -from pulumi_aws import config, ec2, get_availability_zones +from pulumi_aws import ec2, get_availability_zones ## VPC vpc = ec2.Vpc( - 'eks-vpc', + 'eks-vpc', cidr_block='10.100.0.0/16', instance_tenancy='default', enable_dns_hostnames=True, enable_dns_support=True, tags={ - 'Name' : 'pulumi-eks-vpc' - } + 'Name': 'pulumi-eks-vpc', + }, ) igw = ec2.InternetGateway( 'vpc-ig', vpc_id=vpc.id, tags={ - 'Name' : 'pulumi-vpc-ig' - } + 'Name': 'pulumi-vpc-ig', + }, ) eks_route_table = ec2.RouteTable( 'vpc-route-table', vpc_id=vpc.id, - routes=[{ - 'cidr_block' : '0.0.0.0/0', - 'gateway_id' : igw.id - } - ], + routes=[ec2.RouteTableRouteArgs( + cidr_block='0.0.0.0/0', + gateway_id=igw.id, + )], tags={ - 'Name' : 'pulumi-vpc-rt' - } + 'Name': 'pulumi-vpc-rt', + }, ) ## Subnets, one for each AZ in a region @@ -41,15 +40,15 @@ for zone in zones.names: vpc_subnet = ec2.Subnet( - f'vpc-subnet-{zone}' , + f'vpc-subnet-{zone}', assign_ipv6_address_on_creation=False, vpc_id=vpc.id, map_public_ip_on_launch=True, cidr_block=f'10.100.{len(subnet_ids)}.0/24', - availability_zone= zone, + availability_zone=zone, tags={ - 'Name' : f'pulumi-sn-{zone}' - } + 'Name': f'pulumi-sn-{zone}', + }, ) ec2.RouteTableAssociation( f'vpc-route-table-assoc-{zone}', @@ -65,21 +64,22 @@ vpc_id=vpc.id, description='Allow all HTTP(s) traffic to EKS Cluster', tags={ - 'Name' : 'pulumi-cluster-sg' + 'Name': 'pulumi-cluster-sg', }, - ingress=[{ - 'cidr_blocks' : ['0.0.0.0/0'], - 'from_port' : '443', - 'to_port' : '443', - 'protocol' : 'tcp', - 'description' : 'Allow pods to communicate with the cluster API Server.' - }, - { - 'cidr_blocks' : ['0.0.0.0/0'], - 'from_port' : '80', - 'to_port' : '80', - 'protocol' : 'tcp', - 'description' : 'Allow internet access to pods' - } - ] + ingress=[ + ec2.SecurityGroupIngressArgs( + cidr_blocks=['0.0.0.0/0'], + from_port=443, + to_port=443, + protocol='tcp', + description='Allow pods to communicate with the cluster API Server.' + ), + ec2.SecurityGroupIngressArgs( + cidr_blocks=['0.0.0.0/0'], + from_port=80, + to_port=80, + protocol='tcp', + description='Allow internet access to pods' + ), + ], ) diff --git a/aws-py-fargate/__main__.py b/aws-py-fargate/__main__.py index d42884fc2..b01d31bbb 100644 --- a/aws-py-fargate/__main__.py +++ b/aws-py-fargate/__main__.py @@ -6,47 +6,47 @@ cluster = aws.ecs.Cluster('cluster') # Read back the default VPC and public subnets, which we will use. -default_vpc = aws.ec2.get_vpc(default='true') +default_vpc = aws.ec2.get_vpc(default=True) default_vpc_subnets = aws.ec2.get_subnet_ids(vpc_id=default_vpc.id) # Create a SecurityGroup that permits HTTP ingress and unrestricted egress. group = aws.ec2.SecurityGroup('web-secgrp', vpc_id=default_vpc.id, description='Enable HTTP access', - ingress=[{ - 'protocol': 'tcp', - 'from_port': 80, - 'to_port': 80, - 'cidr_blocks': ['0.0.0.0/0'], - }], - egress=[{ - 'protocol': '-1', - 'from_port': 0, - 'to_port': 0, - 'cidr_blocks': ['0.0.0.0/0'], - }] + ingress=[aws.ec2.SecurityGroupIngressArgs( + protocol='tcp', + from_port=80, + to_port=80, + cidr_blocks=['0.0.0.0/0'], + )], + egress=[aws.ec2.SecurityGroupEgressArgs( + protocol='-1', + from_port=0, + to_port=0, + cidr_blocks=['0.0.0.0/0'], + )], ) # Create a load balancer to listen for HTTP traffic on port 80. alb = aws.lb.LoadBalancer('app-lb', security_groups=[group.id], - subnets=default_vpc_subnets.ids + subnets=default_vpc_subnets.ids, ) atg = aws.lb.TargetGroup('app-tg', port=80, protocol='HTTP', target_type='ip', - vpc_id=default_vpc.id + vpc_id=default_vpc.id, ) wl = aws.lb.Listener('web', load_balancer_arn=alb.arn, port=80, - default_actions=[{ - 'type': 'forward', - 'target_group_arn': atg.arn - }] + default_actions=[aws.lb.ListenerDefaultActionArgs( + type='forward', + target_group_arn=atg.arn, + )], ) # Create an IAM role that can be used by our service's task. @@ -61,12 +61,12 @@ }, 'Action': 'sts:AssumeRole', }] - }) + }), ) rpa = aws.iam.RolePolicyAttachment('task-exec-policy', role=role.name, - policy_arn='arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy' + policy_arn='arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy', ) # Spin up a load balanced service running our container image. @@ -93,16 +93,16 @@ desired_count=3, launch_type='FARGATE', task_definition=task_definition.arn, - network_configuration={ - 'assign_public_ip': 'true', - 'subnets': default_vpc_subnets.ids, - 'security_groups': [group.id] - }, - load_balancers=[{ - 'target_group_arn': atg.arn, - 'container_name': 'my-app', - 'container_port': 80 - }], + network_configuration=aws.ecs.ServiceNetworkConfigurationArgs( + assign_public_ip=True, + subnets=default_vpc_subnets.ids, + security_groups=[group.id], + ), + load_balancers=[aws.ecs.ServiceLoadBalancerArgs( + target_group_arn=atg.arn, + container_name='my-app', + container_port=80, + )], opts=ResourceOptions(depends_on=[wl]), ) diff --git a/aws-py-resources/__main__.py b/aws-py-resources/__main__.py index 071105fe1..9bf706840 100644 --- a/aws-py-resources/__main__.py +++ b/aws-py-resources/__main__.py @@ -24,11 +24,11 @@ "mylogmetricfilter", pattern="", log_group_name=log_group.name, - metric_transformation={ - "name": "EventCount", - "namespace": "YourNamespace", - "value": 1, - }) + metric_transformation=cloudwatch.LogMetricFilterMetricTransformationArgs( + name="EventCount", + namespace="YourNamespace", + value="1", + )) log_stream = cloudwatch.LogStream( "mylogstream", @@ -48,10 +48,10 @@ ## DynamoDB db = dynamodb.Table( "mytable", - attributes=[{ - "name": "Id", - "type": "S" - }], + attributes=[dynamodb.TableAttributeArgs( + name="Id", + type="S", + )], hash_key="Id", read_capacity=1, write_capacity=1) @@ -61,12 +61,12 @@ security_group = ec2.SecurityGroup( "mysecuritygroup", - ingress=[{ - "protocol": "tcp", - "from_port": 80, - "to_port": 80, - "cidr_blocks": ["0.0.0.0/0"] - }]) + ingress=[ec2.SecurityGroupIngressArgs( + protocol="tcp", + from_port=80, + to_port=80, + cidr_blocks=["0.0.0.0/0"] + )]) vpc = ec2.Vpc( "myvpc", @@ -78,10 +78,10 @@ public_route_table = ec2.RouteTable( "myroutetable", - routes=[{ - "cidr_block": "0.0.0.0/0", - "gateway_id": igw.id - }], + routes=[ec2.RouteTableRouteArgs( + cidr_block="0.0.0.0/0", + gateway_id=igw.id + )], vpc_id=vpc.id) ## ECR @@ -200,6 +200,6 @@ topic_subscription = sns.TopicSubscription( "mytopicsubscription", - topic=topic, + topic=topic.arn, protocol="sqs", endpoint=queue.arn) diff --git a/aws-py-s3-folder/__main__.py b/aws-py-s3-folder/__main__.py index 3401a93ef..89b88de2a 100644 --- a/aws-py-s3-folder/__main__.py +++ b/aws-py-s3-folder/__main__.py @@ -5,9 +5,10 @@ from pulumi import export, FileAsset from pulumi_aws import s3 -web_bucket = s3.Bucket('s3-website-bucket', website={ - "index_document": "index.html" -}) +web_bucket = s3.Bucket('s3-website-bucket', + website=s3.BucketWebsiteArgs( + index_document="index.html", + )) content_dir = "www" for file in os.listdir(content_dir): @@ -39,5 +40,5 @@ def public_read_policy_for_bucket(bucket_name): policy=bucket_name.apply(public_read_policy_for_bucket)) # Export the name of the bucket -export('bucket_name', web_bucket.id) +export('bucket_name', web_bucket.id) export('website_url', web_bucket.website_endpoint) diff --git a/aws-py-serverless-raw/__main__.py b/aws-py-serverless-raw/__main__.py index 9f8398cdb..9d151c7f1 100644 --- a/aws-py-serverless-raw/__main__.py +++ b/aws-py-serverless-raw/__main__.py @@ -1,9 +1,8 @@ """Copyright 2016-2019, Pulumi Corporation. All rights reserved.""" +import json + import pulumi import pulumi_aws as aws -import pulumi_aws.config -from pulumi import Output -import json # The location of the built dotnet3.1 application to deploy dotnet_application_publish_folder = "./app/bin/Debug/netcoreapp3.1/publish" @@ -11,6 +10,8 @@ # The stage name to use for the API Gateway URL custom_stage_name = "api" +region = aws.config.region + ################# ## DynamoDB Table ################# @@ -18,15 +19,15 @@ # A DynamoDB table with a single primary key counter_table = aws.dynamodb.Table("counterTable", attributes=[ - { - "name": "Id", - "type": "S", - }, + aws.dynamodb.TableAttributeArgs( + name="Id", + type="S", + ), ], hash_key="Id", read_capacity=1, write_capacity=1, - ) +) ################## ## Lambda Function @@ -34,34 +35,34 @@ # Give our Lambda access to the Dynamo DB table, CloudWatch Logs and Metrics # Python package does not have assumeRolePolicyForPrinciple -instance_assume_role_policy = aws.iam.get_policy_document(statements=[{ - "actions": ["sts:AssumeRole"], - "principals": [{ - "identifiers": ["lambda.amazonaws.com"], - "type": "Service", - }], -}]) +instance_assume_role_policy = aws.iam.get_policy_document( + statements=[aws.iam.GetPolicyDocumentStatementArgs( + actions=["sts:AssumeRole"], + principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs( + identifiers=["lambda.amazonaws.com"], + type="Service", + )], + )]) role = aws.iam.Role("mylambda-role", assume_role_policy=instance_assume_role_policy.json, - ) +) policy = aws.iam.RolePolicy("mylambda-policy", - role=role, - policy=Output.from_input({ + role=role.id, + policy=counter_table.arn.apply(lambda arn: json.dumps({ "Version": "2012-10-17", "Statement": [{ "Action": ["dynamodb:UpdateItem", "dynamodb:PutItem", "dynamodb:GetItem", "dynamodb:DescribeTable"], - "Resource": counter_table.arn, + "Resource": arn, "Effect": "Allow", }, { "Action": ["logs:*", "cloudwatch:*"], "Resource": "*", "Effect": "Allow", }], - }), - ) + }))) # Read the config of whether to provision fixed concurrency for Lambda config = pulumi.Config() @@ -80,18 +81,18 @@ role=role.arn, publish=bool(provisioned_concurrent_executions), # Versioning required for provisioned concurrency - environment={ - "variables": { + environment=aws.lambda_.FunctionEnvironmentArgs( + variables={ "COUNTER_TABLE": counter_table.name, }, - }, - ) + ), +) if provisioned_concurrent_executions: concurrency = aws.lambda_.ProvisionedConcurrencyConfig("concurrency", - function_name=lambda_func.name, - qualifier=lambda_func.version, - provisioned_concurrent_executions=provisioned_concurrent_executions, + function_name=lambda_func.name, + qualifier=lambda_func.version, + provisioned_concurrent_executions=provisioned_concurrent_executions ) @@ -99,27 +100,12 @@ ## APIGateway RestAPI ###################### -# Create the Swagger spec for a proxy which forwards all HTTP requests through to the Lambda function. -def swagger_spec(lambda_arn): - swagger_spec_returns = { - "swagger": "2.0", - "info": {"title": "api", "version": "1.0"}, - "paths": { - "/{proxy+}": swagger_route_handler(lambda_arn), - }, - } - return json.dumps(swagger_spec_returns) - - # Create a single Swagger spec route handler for a Lambda function. -def swagger_route_handler(lambda_arn): - region = pulumi_aws.config.region - uri_string = 'arn:aws:apigateway:{region}:lambda:path/2015-03-31/functions/{lambdaArn}/invocations'.format( - region=region, lambdaArn=lambda_arn) +def swagger_route_handler(arn): return ({ "x-amazon-apigateway-any-method": { "x-amazon-apigateway-integration": { - "uri": uri_string, + "uri": f'arn:aws:apigateway:{region}:lambda:path/2015-03-31/functions/{arn}/invocations', "passthroughBehavior": "when_no_match", "httpMethod": "POST", "type": "aws_proxy", @@ -127,34 +113,38 @@ def swagger_route_handler(lambda_arn): }, }) - # Create the API Gateway Rest API, using a swagger spec. rest_api = aws.apigateway.RestApi("api", - body=lambda_func.arn.apply(lambda lambda_arn: swagger_spec(lambda_arn)), - ) + body=lambda_func.arn.apply(lambda arn: json.dumps({ + "swagger": "2.0", + "info": {"title": "api", "version": "1.0"}, + "paths": { + "/{proxy+}": swagger_route_handler(arn), + }, + }))) # Create a deployment of the Rest API. deployment = aws.apigateway.Deployment("api-deployment", - rest_api=rest_api, + rest_api=rest_api.id, # Note: Set to empty to avoid creating an implicit stage, we'll create it # explicitly below instead. - stage_name="") + stage_name="", +) # Create a stage, which is an addressable instance of the Rest API. Set it to point at the latest deployment. stage = aws.apigateway.Stage("api-stage", - rest_api=rest_api, - deployment=deployment, + rest_api=rest_api.id, + deployment=deployment.id, stage_name=custom_stage_name, - ) +) # Give permissions from API Gateway to invoke the Lambda invoke_permission = aws.lambda_.Permission("api-lambda-permission", action="lambda:invokeFunction", - function=lambda_func, + function=lambda_func.name, principal="apigateway.amazonaws.com", - source_arn=deployment.execution_arn.apply( - lambda execution_arn: execution_arn + "*/*"), - ) + source_arn=deployment.execution_arn.apply(lambda arn: arn + "*/*"), +) # Export the https endpoint of the running Rest API pulumi.export("endpoint", deployment.invoke_url.apply(lambda url: url + custom_stage_name)) diff --git a/aws-py-stackreference/team/__main__.py b/aws-py-stackreference/team/__main__.py index e586b30b3..eb9c836fa 100644 --- a/aws-py-stackreference/team/__main__.py +++ b/aws-py-stackreference/team/__main__.py @@ -1,5 +1,5 @@ from pulumi import StackReference, Config, export -from pulumi_aws import get_ami, ec2 +from pulumi_aws import get_ami, ec2, GetAmiFilterArgs config = Config() company_stack = StackReference(config.require("companyStack")) @@ -13,13 +13,13 @@ } ami_id = get_ami( - most_recent="true", + most_recent=True, owners=["099720109477"], filters=[ - { - "name":"name", - "values":["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] - }] + GetAmiFilterArgs( + name="name", + values=["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] + )] ).id instance = ec2.Instance( diff --git a/aws-py-static-website/__main__.py b/aws-py-static-website/__main__.py index 7224774c1..19f95700f 100644 --- a/aws-py-static-website/__main__.py +++ b/aws-py-static-website/__main__.py @@ -34,10 +34,10 @@ def get_domain_and_subdomain(domain): content_bucket = pulumi_aws.s3.Bucket('contentBucket', bucket=target_domain, acl='public-read', - website={ - 'index_document': 'index.html', - 'error_document': '404.html' - }) + website=pulumi_aws.s3.BucketWebsiteArgs( + index_document='index.html', + error_document='404.html' + )) def crawl_directory(content_dir, f): """ @@ -63,7 +63,7 @@ def bucket_object_converter(filepath): relative_path, key=relative_path, acl='public-read', - bucket=content_bucket, + bucket=content_bucket.id, content_type=mime_type, source=FileAsset(filepath), opts=ResourceOptions(parent=content_bucket) @@ -91,12 +91,12 @@ def bucket_object_converter(filepath): # Create a validation record to prove that we own the domain. cert_validation_domain = pulumi_aws.route53.Record(f'{target_domain}-validation', name=certificate.domain_validation_options.apply( - lambda o: o[0]['resourceRecordName']), + lambda o: o[0].resource_record_name), zone_id=hzid, type=certificate.domain_validation_options.apply( - lambda o: o[0]['resourceRecordType']), + lambda o: o[0].resource_record_type), records=[certificate.domain_validation_options.apply( - lambda o: o[0]['resourceRecordValue'])], + lambda o: o[0].resource_record_value)], ttl=TEN_MINUTES) # Create a special resource to await complete validation of the cert. @@ -117,53 +117,53 @@ def bucket_object_converter(filepath): aliases=[ target_domain ], - origins=[{ - 'originId': content_bucket.arn, - 'domain_name': content_bucket.website_endpoint, - 'customOriginConfig': { - 'originProtocolPolicy': 'http-only', - 'httpPort': 80, - 'httpsPort': 443, - 'originSslProtocols': ['TLSv1.2'], - } - }], + origins=[pulumi_aws.cloudfront.DistributionOriginArgs( + origin_id=content_bucket.arn, + domain_name=content_bucket.website_endpoint, + custom_origin_config=pulumi_aws.cloudfront.DistributionOriginCustomOriginConfigArgs( + origin_protocol_policy='http-only', + http_port=80, + https_port=443, + origin_ssl_protocols=['TLSv1.2'], + ) + )], default_root_object='index.html', - default_cache_behavior={ - 'targetOriginId': content_bucket.arn, - 'viewerProtocolPolicy': 'redirect-to-https', - 'allowedMethods': ['GET', 'HEAD', 'OPTIONS'], - 'cachedMethods': ['GET', 'HEAD', 'OPTIONS'], - 'forwardedValues': { - 'cookies': { 'forward': 'none' }, - 'queryString': False, - }, - 'minTtl': 0, - 'defaultTtl': TEN_MINUTES, - 'maxTtl': TEN_MINUTES, - }, + default_cache_behavior=pulumi_aws.cloudfront.DistributionDefaultCacheBehaviorArgs( + target_origin_id=content_bucket.arn, + viewer_protocol_policy='redirect-to-https', + allowed_methods=['GET', 'HEAD', 'OPTIONS'], + cached_methods=['GET', 'HEAD', 'OPTIONS'], + forwarded_values=pulumi_aws.cloudfront.DistributionDefaultCacheBehaviorForwardedValuesArgs( + cookies=pulumi_aws.cloudfront.DistributionDefaultCacheBehaviorForwardedValuesCookiesArgs(forward='none'), + query_string=False, + ), + min_ttl=0, + default_ttl=TEN_MINUTES, + max_ttl=TEN_MINUTES, + ), # PriceClass_100 is the lowest cost tier (US/EU only). price_class= 'PriceClass_100', - custom_error_responses=[{ - 'errorCode': 404, - 'responseCode': 404, - 'responsePagePath': '/404.html' - }], + custom_error_responses=[pulumi_aws.cloudfront.DistributionCustomErrorResponseArgs( + error_code=404, + response_code=404, + response_page_path='/404.html' + )], # Use the certificate we generated for this distribution. - viewer_certificate={ - 'acmCertificateArn': certificate_arn, - 'sslSupportMethod': 'sni-only', - }, - restrictions={ - 'geoRestriction': { - 'restrictionType': 'none' - } - }, + viewer_certificate=pulumi_aws.cloudfront.DistributionViewerCertificateArgs( + acm_certificate_arn=certificate_arn, + ssl_support_method='sni-only', + ), + restrictions=pulumi_aws.cloudfront.DistributionRestrictionsArgs( + geo_restriction=pulumi_aws.cloudfront.DistributionRestrictionsGeoRestrictionArgs( + restriction_type='none' + ) + ), # Put access logs in the log bucket we created earlier. - logging_config={ - 'bucket': logs_bucket.bucket_domain_name, - 'includeCookies': False, - 'prefix': f'${target_domain}/', - }, + logging_config=pulumi_aws.cloudfront.DistributionLoggingConfigArgs( + bucket=logs_bucket.bucket_domain_name, + include_cookies=False, + prefix=f'${target_domain}/', + ), # CloudFront typically takes 15 minutes to fully deploy a new distribution. # Skip waiting for that to complete. wait_for_deployment=False) @@ -179,11 +179,11 @@ def create_alias_record(target_domain, distribution): zone_id=hzid, type='A', aliases=[ - { - 'name': distribution.domain_name, - 'zoneId': distribution.hosted_zone_id, - 'evaluateTargetHealth': True - } + pulumi_aws.route53.RecordAliasArgs( + name=distribution.domain_name, + zone_id=distribution.hosted_zone_id, + evaluate_target_health=True, + ) ] ) diff --git a/aws-py-voting-app/__main__.py b/aws-py-voting-app/__main__.py index 7e0d2fe16..918b5eccb 100644 --- a/aws-py-voting-app/__main__.py +++ b/aws-py-voting-app/__main__.py @@ -21,7 +21,7 @@ app_vpc_subnet = aws.ec2.Subnet("app-vpc-subnet", cidr_block="172.31.32.0/20", - vpc_id=app_vpc) + vpc_id=app_vpc.id) # Creating a gateway to the web for the VPC app_gateway = aws.ec2.InternetGateway("app-gateway", @@ -29,34 +29,34 @@ app_routetable = aws.ec2.RouteTable("app-routetable", routes=[ - { - "cidr_block": "0.0.0.0/0", - "gateway_id": app_gateway.id, - } + aws.ec2.RouteTableRouteArgs( + cidr_block="0.0.0.0/0", + gateway_id=app_gateway.id, + ) ], vpc_id=app_vpc.id) # Associating our gateway with our VPC, to allow our app to communicate with the greater internet app_routetable_association = aws.ec2.MainRouteTableAssociation("app_routetable_association", route_table_id=app_routetable.id, - vpc_id=app_vpc) + vpc_id=app_vpc.id) # Creating a Security Group that restricts incoming traffic to HTTP app_security_group = aws.ec2.SecurityGroup("security-group", vpc_id=app_vpc.id, description="Enables HTTP access", - ingress=[{ - 'protocol': 'tcp', - 'from_port': 0, - 'to_port': 65535, - 'cidr_blocks': ['0.0.0.0/0'], - }], - egress=[{ - 'protocol': '-1', - 'from_port': 0, - 'to_port': 0, - 'cidr_blocks': ['0.0.0.0/0'], - }]) + ingress=[aws.ec2.SecurityGroupIngressArgs( + protocol='tcp', + from_port=0, + to_port=65535, + cidr_blocks=['0.0.0.0/0'], + )], + egress=[aws.ec2.SecurityGroupEgressArgs( + protocol='-1', + from_port=0, + to_port=0, + cidr_blocks=['0.0.0.0/0'], + )]) # Creating an IAM role used by Fargate to execute all our services app_exec_role = aws.iam.Role("app-exec-role", @@ -130,16 +130,16 @@ port=redis_port, protocol="TCP", target_type="ip", - stickiness= { - "enabled": False, - "type": "lb_cookie", - }, + stickiness=aws.lb.TargetGroupStickinessArgs( + enabled=False, + type="lb_cookie", + ), vpc_id=app_vpc.id) # Creating a load balancer to spread out incoming requests redis_balancer = aws.lb.LoadBalancer("redis-balancer", load_balancer_type="network", - internal= False, + internal=False, security_groups=[], subnets=[app_vpc_subnet.id]) @@ -148,10 +148,10 @@ load_balancer_arn=redis_balancer.arn, port=redis_port, protocol="TCP", - default_actions=[{ - "type": "forward", - "target_group_arn": redis_targetgroup.arn - }]) + default_actions=[aws.lb.ListenerDefaultActionArgs( + type="forward", + target_group_arn=redis_targetgroup.arn + )]) # Creating a task definition for the Redis instance. redis_task_definition = aws.ecs.TaskDefinition("redis-task-definition", @@ -182,20 +182,20 @@ launch_type="FARGATE", task_definition=redis_task_definition.arn, wait_for_steady_state=False, - network_configuration={ - "assign_public_ip": "true", - "subnets": [app_vpc_subnet.id], - "security_groups": [app_security_group.id] - }, - load_balancers=[{ - "target_group_arn": redis_targetgroup.arn, - "container_name": "redis-container", - "container_port": redis_port, - }], + network_configuration=aws.ecs.ServiceNetworkConfigurationArgs( + assign_public_ip=True, + subnets=[app_vpc_subnet.id], + security_groups=[app_security_group.id] + ), + load_balancers=[aws.ecs.ServiceLoadBalancerArgs( + target_group_arn=redis_targetgroup.arn, + container_name="redis-container", + container_port=redis_port, + )], opts=pulumi.ResourceOptions(depends_on=[redis_listener]), ) -# Creating a special endpoint for the Redis backend, which we will provide +# Creating a special endpoint for the Redis backend, which we will provide # to the Flask frontend as an environment variable redis_endpoint = {"host": redis_balancer.dns_name, "port": redis_port} @@ -206,10 +206,10 @@ port=80, protocol="TCP", target_type="ip", - stickiness= { - "enabled": False, - "type": "lb_cookie", - }, + stickiness=aws.lb.TargetGroupStickinessArgs( + enabled=False, + type="lb_cookie", + ), vpc_id=app_vpc.id) # Creating a load balancer to spread out incoming requests @@ -224,10 +224,10 @@ load_balancer_arn=flask_balancer.arn, port=80, protocol="TCP", - default_actions=[{ - "type": "forward", - "target_group_arn": flask_targetgroup.arn - }]) + default_actions=[aws.lb.ListenerDefaultActionArgs( + type="forward", + target_group_arn=flask_targetgroup.arn + )]) # Creating a Docker image from "./frontend/Dockerfile", which we will use # to upload our app @@ -281,16 +281,16 @@ def get_registry_info(rid): launch_type="FARGATE", task_definition=flask_task_definition.arn, wait_for_steady_state=False, - network_configuration={ - "assign_public_ip": "true", - "subnets": [app_vpc_subnet.id], - "security_groups": [app_security_group.id] - }, - load_balancers=[{ - "target_group_arn": flask_targetgroup.arn, - "container_name": "flask-container", - "container_port": 80, - }], + network_configuration=aws.ecs.ServiceNetworkConfigurationArgs( + assign_public_ip=True, + subnets=[app_vpc_subnet.id], + security_groups=[app_security_group.id] + ), + load_balancers=[aws.ecs.ServiceLoadBalancerArgs( + target_group_arn=flask_targetgroup.arn, + container_name="flask-container", + container_port=80, + )], opts=pulumi.ResourceOptions(depends_on=[flask_listener]), ) diff --git a/aws-py-voting-app/requirements.txt b/aws-py-voting-app/requirements.txt index d9bbfba32..9b0f784a4 100644 --- a/aws-py-voting-app/requirements.txt +++ b/aws-py-voting-app/requirements.txt @@ -1,3 +1,3 @@ pulumi>=2.0.0,<3.0.0 -pulumi-aws>=2.0.0,<3.0.0 +pulumi-aws>=3.0.0,<4.0.0 pulumi-docker>=2.0.0,<3.0.0 diff --git a/aws-py-webserver/__main__.py b/aws-py-webserver/__main__.py index a4e59a8a2..6ed9f74f1 100644 --- a/aws-py-webserver/__main__.py +++ b/aws-py-webserver/__main__.py @@ -5,15 +5,18 @@ size = 't2.micro' -ami = aws.get_ami(most_recent="true", +ami = aws.get_ami(most_recent=True, owners=["137112412989"], - filters=[{"name":"name","values":["amzn-ami-hvm-*"]}]) + filters=[aws.GetAmiFilterArgs(name="name", values=["amzn-ami-hvm-*"])]) group = aws.ec2.SecurityGroup('web-secgrp', description='Enable HTTP access', - ingress=[ - { 'protocol': 'tcp', 'from_port': 80, 'to_port': 80, 'cidr_blocks': ['0.0.0.0/0'] } - ]) + ingress=[aws.ec2.SecurityGroupIngressArgs( + protocol='tcp', + from_port=80, + to_port=80, + cidr_blocks=['0.0.0.0/0'], + )]) user_data = """ #!/bin/bash diff --git a/azure-py-aks-multicluster/__main__.py b/azure-py-aks-multicluster/__main__.py index a08c50d5b..c0fae601d 100644 --- a/azure-py-aks-multicluster/__main__.py +++ b/azure-py-aks-multicluster/__main__.py @@ -7,7 +7,7 @@ password = config.get_secret("password") or random.RandomPassword( "pwd", length=20, - special="true").result + special=True).result ssh_public_key = config.require("sshPublicKey") resource_group=core.ResourceGroup("aksresourcegroup") @@ -24,31 +24,32 @@ value=password, end_date="2099-01-01T00:00:00Z") -aks_cluster_config = [] -aks_cluster_config.append({"name": "east", "location": "eastus", "node_count": "2", "node_size": "Standard_D2_v2"}) -aks_cluster_config.append({"name": "west", "location": "westus", "node_count": "2", "node_size": "Standard_D2_v2"}) +aks_cluster_config = [ + {"name": "east", "location": "eastus", "node_count": 2, "node_size": "Standard_D2_v2"}, + {"name": "west", "location": "westus", "node_count": 2, "node_size": "Standard_D2_v2"}, +] cluster_names = [] for config in aks_cluster_config: cluster = containerservice.KubernetesCluster( "aksCluster-%s" % config["name"], resource_group_name=resource_group.name, - linux_profile={ - "admin_username": "aksuser", - "ssh_key": { - "key_data": ssh_public_key, - }, - }, - service_principal={ - "client_id": ad_app.application_id, - "client_secret": ad_sp_password.value - }, + linux_profile=containerservice.KubernetesClusterLinuxProfileArgs( + admin_username="aksuser", + ssh_key=containerservice.KubernetesClusterLinuxProfileSshKeyArgs( + key_data=ssh_public_key, + ), + ), + service_principal=containerservice.KubernetesClusterServicePrincipalArgs( + client_id=ad_app.application_id, + client_secret=ad_sp_password.value + ), location=config["location"], - default_node_pool={ - "name": "aksagentpool", - "node_count": config["node_count"], - "vm_size": config["node_size"], - }, + default_node_pool=containerservice.KubernetesClusterDefaultNodePoolArgs( + name="aksagentpool", + node_count=config["node_count"], + vm_size=config["node_size"], + ), dns_prefix="sample-kube", ) cluster_names.append(cluster.name) diff --git a/azure-py-aks/__main__.py b/azure-py-aks/__main__.py index fec2d3636..33914772f 100644 --- a/azure-py-aks/__main__.py +++ b/azure-py-aks/__main__.py @@ -1,10 +1,24 @@ import pulumi from pulumi import ResourceOptions from pulumi_kubernetes import Provider -from pulumi_kubernetes.apps.v1 import Deployment -from pulumi_kubernetes.core.v1 import Service +from pulumi_kubernetes.apps.v1 import Deployment, DeploymentSpecArgs +from pulumi_kubernetes.core.v1 import ( + ContainerArgs, + PodSpecArgs, + PodTemplateSpecArgs, + Service, + ServicePortArgs, + ServiceSpecArgs, +) +from pulumi_kubernetes.meta.v1 import LabelSelectorArgs, ObjectMetaArgs from pulumi_azure.core import ResourceGroup -from pulumi_azure.containerservice import KubernetesCluster +from pulumi_azure.containerservice import ( + KubernetesCluster, + KubernetesClusterDefaultNodePoolArgs, + KubernetesClusterLinuxProfileArgs, + KubernetesClusterLinuxProfileSshKeyArgs, + KubernetesClusterServicePrincipalArgs, +) from pulumi_azuread import Application, ServicePrincipal, ServicePrincipalPassword # read and set config values @@ -38,13 +52,21 @@ resource_group_name=resource_group.name, kubernetes_version="1.18.6", dns_prefix="dns", - linux_profile={"adminUsername": "aksuser", "ssh_key": {"keyData": SSHKEY}}, - service_principal={"client_id": app.application_id, "client_secret": sppwd.value}, - default_node_pool={ - "name": "type1", - "node_count": 2, - "vm_size": "Standard_B2ms", - }, + linux_profile=KubernetesClusterLinuxProfileArgs( + admin_username="aksuser", + ssh_key=KubernetesClusterLinuxProfileSshKeyArgs( + key_data=SSHKEY + ) + ), + service_principal=KubernetesClusterServicePrincipalArgs( + client_id=app.application_id, + client_secret=sppwd.value + ), + default_node_pool=KubernetesClusterDefaultNodePoolArgs( + name="type1", + node_count=2, + vm_size="Standard_B2ms", + ), ) k8s_provider = Provider( @@ -54,21 +76,21 @@ labels = {"app": "nginx"} nginx = Deployment( "k8s-nginx", - spec={ - "selector": {"matchLabels": labels}, - "replicas": 1, - "template": { - "metadata": {"labels": labels}, - "spec": {"containers": [{"name": "nginx", "image": "nginx"}]}, - }, - }, - __opts__=ResourceOptions(parent=k8s_provider, provider=k8s_provider), + spec=DeploymentSpecArgs( + selector=LabelSelectorArgs(match_labels=labels), + replicas=1, + template=PodTemplateSpecArgs( + metadata=ObjectMetaArgs(labels=labels), + spec=PodSpecArgs(containers=[ContainerArgs(name="nginx", image="nginx")]), + ), + ), + opts=ResourceOptions(parent=k8s_provider, provider=k8s_provider), ) ingress = Service( "k8s-nginx", - spec={"type": "LoadBalancer", "selector": labels, "ports": [{"port": 80}]}, - __opts__=ResourceOptions(parent=k8s_provider, provider=k8s_provider), + spec=ServiceSpecArgs(type="LoadBalancer", selector=labels, ports=[ServicePortArgs(port=80)]), + opts=ResourceOptions(parent=k8s_provider, provider=k8s_provider), ) pulumi.export("kubeconfig", aks.kube_config_raw) diff --git a/azure-py-appservice-docker/Pulumi.yaml b/azure-py-appservice-docker/Pulumi.yaml index 52aea96f9..7208b8294 100644 --- a/azure-py-appservice-docker/Pulumi.yaml +++ b/azure-py-appservice-docker/Pulumi.yaml @@ -6,3 +6,6 @@ template: azure:environment: description: The Azure environment to use (`public`, `usgovernment`, `german`, `china`) default: public + azure:location: + description: The Azure location to use + default: WestUS diff --git a/azure-py-appservice-docker/README.md b/azure-py-appservice-docker/README.md index 18643a95d..a4f130227 100644 --- a/azure-py-appservice-docker/README.md +++ b/azure-py-appservice-docker/README.md @@ -30,6 +30,12 @@ The example deploys an existing image from Docker Hub $ pip3 install -r requirements.txt ``` +1. Specify the Azure location to use: + + ```bash + $ pulumi config set azure:location WestUS + ``` + 1. Run `pulumi up` to preview and deploy changes: ```bash @@ -53,4 +59,3 @@ The example deploys an existing image from Docker Hub $ curl "$(pulumi stack output hello_endpoint)" Hello, world! ``` - diff --git a/azure-py-appservice-docker/__main__.py b/azure-py-appservice-docker/__main__.py index c63b06baf..5df46835f 100644 --- a/azure-py-appservice-docker/__main__.py +++ b/azure-py-appservice-docker/__main__.py @@ -1,5 +1,5 @@ from pulumi_azure import core, appservice, containerservice -from pulumi import export, Output +from pulumi import export resource_group = core.ResourceGroup("samples") @@ -7,11 +7,11 @@ "linux-apps", resource_group_name=resource_group.name, kind="Linux", - reserved="true", - sku={ - "tier": "Basic", - "size": "B1", - }) + reserved=True, + sku=appservice.PlanSkuArgs( + tier="Basic", + size="B1", + )) docker_image = "microsoft/azure-appservices-go-quickstart" @@ -22,11 +22,11 @@ app_settings={ "WEBSITES_ENABLE_APP_SERVICE_STORAGE": "false", }, - site_config={ - "always_on": "true", - "linux_fx_version": "DOCKER|%s" % docker_image, - }, - https_only="true") + site_config=appservice.AppServiceSiteConfigArgs( + always_on=True, + linux_fx_version="DOCKER|%s" % docker_image, + ), + https_only=True) export("hello_endpoint", hello_app.default_site_hostname.apply( lambda endpoint: "https://" + endpoint + "/hello" diff --git a/azure-py-appservice/Pulumi.yaml b/azure-py-appservice/Pulumi.yaml index f8d8f8e27..c0dc44df5 100644 --- a/azure-py-appservice/Pulumi.yaml +++ b/azure-py-appservice/Pulumi.yaml @@ -6,6 +6,9 @@ template: azure:environment: description: The Azure environment to use (`public`, `usgovernment`, `german`, `china`) default: public + azure:location: + description: The Azure location to use + default: WestUS sqlPassword: description: SQL Server password (complex enough to satisfy Azure policy) secret: true diff --git a/azure-py-appservice/README.md b/azure-py-appservice/README.md index fd41c8c67..ac9656f98 100644 --- a/azure-py-appservice/README.md +++ b/azure-py-appservice/README.md @@ -31,10 +31,16 @@ with App Service. $ pip3 install -r requirements.txt ``` +1. Specify the Azure location to use: + + ```bash + $ pulumi config set azure:location WestUS + ``` + 1. Define SQL Server password (make it complex enough to satisfy Azure policy): ```bash - pulumi config set --secret sqlPassword + $ pulumi config set --secret sqlPassword ``` 1. Run `pulumi up` to preview and deploy changes: diff --git a/azure-py-appservice/__main__.py b/azure-py-appservice/__main__.py index d27addc73..fb7136568 100644 --- a/azure-py-appservice/__main__.py +++ b/azure-py-appservice/__main__.py @@ -19,10 +19,10 @@ "appservice-asp", resource_group_name=resource_group.name, kind="App", - sku={ - "tier": "Basic", - "size": "B1", - }) + sku=appservice.PlanSkuArgs( + tier="Basic", + size="B1", + )) storage_container = storage.Container( "appservice-c", @@ -42,14 +42,14 @@ def get_sas(args): start="2020-01-01", expiry="2030-01-01", container_name=args[2], - permissions={ - "read": "true", - "write": "false", - "delete": "false", - "list": "false", - "add": "false", - "create": "false" - } + permissions=storage.GetAccountBlobContainerSASPermissionsArgs( + read=True, + write=False, + delete=False, + list=False, + add=False, + create=False, + ) ) return f"https://{args[0]}.blob.core.windows.net/{args[2]}/{args[3]}{blob_sas.sas}" @@ -94,11 +94,11 @@ def get_sas(args): "ApplicationInsightsAgent_EXTENSION_VERSION": "~2", "WEBSITE_RUN_FROM_PACKAGE": signed_blob_url, }, - connection_strings=[{ - "name": "db", - "type": "SQLAzure", - "value": connection_string - }] + connection_strings=[appservice.AppServiceConnectionStringArgs( + name="db", + type="SQLAzure", + value=connection_string, + )] ) export("endpoint", app.default_site_hostname.apply( diff --git a/azure-py-functions-raw/__main__.py b/azure-py-functions-raw/__main__.py index 46da08632..b2b59bf75 100644 --- a/azure-py-functions-raw/__main__.py +++ b/azure-py-functions-raw/__main__.py @@ -32,14 +32,14 @@ def get_sas(args): start="2020-01-01", expiry="2030-01-01", container_name=args[2], - permissions={ - "read": "true", - "write": "false", - "delete": "false", - "list": "false", - "add": "false", - "create": "false" - } + permissions=storage.GetAccountBlobContainerSASPermissionsArgs( + read=True, + write=False, + delete=False, + list=False, + add=False, + create=False, + ) ) return f"https://{args[0]}.blob.core.windows.net/{args[2]}/{args[3]}{blob_sas.sas}" @@ -53,11 +53,11 @@ def get_sas(args): "http", resource_group_name=resource_group.name, kind="Linux", - sku={ - "tier": "Dynamic", - "size": "Y1" - }, - reserved="true" + sku=appservice.PlanSkuArgs( + tier="Dynamic", + size="Y1" + ), + reserved=True, ) http_function_app=appservice.FunctionApp( diff --git a/azure-py-hdinsight-spark/Pulumi.yaml b/azure-py-hdinsight-spark/Pulumi.yaml index 924e4cfed..6fe1416a7 100644 --- a/azure-py-hdinsight-spark/Pulumi.yaml +++ b/azure-py-hdinsight-spark/Pulumi.yaml @@ -6,6 +6,9 @@ template: azure:environment: description: The Azure environment to use (`public`, `usgovernment`, `german`, `china`) default: public + azure:location: + description: The Azure location to use + default: WestUS username: description: Spark username secret: true diff --git a/azure-py-hdinsight-spark/README.md b/azure-py-hdinsight-spark/README.md index a43eeb34b..94874a491 100644 --- a/azure-py-hdinsight-spark/README.md +++ b/azure-py-hdinsight-spark/README.md @@ -28,6 +28,19 @@ An example Pulumi component that deploys a Spark cluster on Azure HDInsight. $ pip3 install -r requirements.txt ``` +1. Specify the Azure location to use: + + ```bash + $ pulumi config set azure:location WestUS + ``` + +1. Define Spark username and password (make it complex enough to satisfy Azure policy): + + ```bash + $ pulumi config set username + $ pulumi config set --secret password + ``` + 1. Run `pulumi up` to preview and deploy changes: ``` bash diff --git a/azure-py-hdinsight-spark/__main__.py b/azure-py-hdinsight-spark/__main__.py index e5ca036d9..dc90275a1 100644 --- a/azure-py-hdinsight-spark/__main__.py +++ b/azure-py-hdinsight-spark/__main__.py @@ -22,38 +22,37 @@ "myspark", resource_group_name=resource_group.name, cluster_version="3.6", - component_version={ - "spark": "2.3" - }, + component_version=hdinsight.SparkClusterComponentVersionArgs( + spark="2.3" + ), tier="Standard", - storage_accounts=[{ - "is_default": "true", - "storage_account_key": storage_account.primary_access_key, - "storage_container_id": storage_container.id - }], - gateway={ - "enabled": "true", - "username": username, - "password": password - }, - roles={ - "head_node": { - "vm_size": "Standard_D12_v2", - "username": username, - "password": password - }, - "worker_node": { - "vm_size": "Standard_D12_v2", - "username": username, - "password": password, - "target_instance_count": "3", - }, - "zookeeper_node": { - "vm_size": "Standard_D12_v2", - "username": username, - "password": password - } - } + storage_accounts=[hdinsight.SparkClusterStorageAccountArgs( + is_default=True, + storage_account_key=storage_account.primary_access_key, + storage_container_id=storage_container.id + )], + gateway=hdinsight.SparkClusterGatewayArgs( + username=username, + password=password + ), + roles=hdinsight.SparkClusterRolesArgs( + head_node=hdinsight.SparkClusterRolesHeadNodeArgs( + vm_size="Standard_D12_v2", + username=username, + password=password + ), + worker_node=hdinsight.SparkClusterRolesWorkerNodeArgs( + vm_size="Standard_D12_v2", + username=username, + password=password, + target_instance_count=3, + ), + zookeeper_node=hdinsight.SparkClusterRolesZookeeperNodeArgs( + vm_size="Standard_D12_v2", + username=username, + password=password, + ), + ), ) export("endpoint", spark_cluster.https_endpoint.apply( diff --git a/azure-py-msi-keyvault-rbac/__main__.py b/azure-py-msi-keyvault-rbac/__main__.py index e21478e32..fa97ec289 100644 --- a/azure-py-msi-keyvault-rbac/__main__.py +++ b/azure-py-msi-keyvault-rbac/__main__.py @@ -29,7 +29,7 @@ def createFirewallRules(arg): administrator_login_password = random.RandomPassword( "password", length=16, - special="true", + special=True, ).result sql_server = sql.SqlServer( @@ -61,10 +61,10 @@ def createFirewallRules(arg): "asp", resource_group_name=resource_group.name, kind="App", - sku={ - "tier": "Basic", - "size": "B1" - } + sku=appservice.PlanSkuArgs( + tier="Basic", + size="B1" + ) ) blob = storage.Blob( @@ -84,11 +84,11 @@ def createFirewallRules(arg): resource_group_name=resource_group.name, sku_name="standard", tenant_id=tenant_id, - access_policies=[{ - "tenant_id": tenant_id, - "object_id": current_principal, - "secret_permissions": ["delete", "get", "list", "set"] - }] + access_policies=[keyvault.KeyVaultAccessPolicyArgs( + tenant_id=tenant_id, + object_id=current_principal, + secret_permissions=["delete", "get", "list", "set"] + )] ) def get_sas(args): @@ -97,14 +97,14 @@ def get_sas(args): start="2020-01-01", expiry="2030-01-01", container_name=args[2], - permissions={ - "read": "true", - "write": "false", - "delete": "false", - "list": "false", - "add": "false", - "create": "false" - } + permissions=storage.GetAccountBlobContainerSASPermissionsArgs( + read=True, + write=False, + delete=False, + list=False, + add=False, + create=False, + ) ) return f"https://{args[0]}.blob.core.windows.net/{args[2]}/{args[3]}{blob_sas.sas}" @@ -127,22 +127,22 @@ def get_sas(args): "app", resource_group_name=resource_group.name, app_service_plan_id=app_service_plan.id, - identity={ - "type": "SystemAssigned", - }, + identity=appservice.AppServiceIdentityArgs( + type="SystemAssigned", + ), app_settings={ "WEBSITE_RUN_FROM_ZIP": secret_uri.apply(lambda args: "@Microsoft.KeyVault(SecretUri=" + args + ")"), - "StorageBlobUrl": text_blob.url + "StorageBlobUrl": text_blob.url, }, - connection_strings=[{ - "name": "db", - "value": connection_string, - "type": "SQLAzure" - }] + connection_strings=[appservice.AppServiceConnectionStringArgs( + name="db", + value=connection_string, + type="SQLAzure", + )] ) ## Work around a preview issue https://github.com/pulumi/pulumi-azure/issues/192 -principal_id = app.identity["principal_id"] or "11111111-1111-1111-1111-111111111111" +principal_id = app.identity.apply(lambda id: id.principal_id or "11111111-1111-1111-1111-111111111111") policy = keyvault.AccessPolicy( "app-policy", diff --git a/azure-py-virtual-data-center/vdc.py b/azure-py-virtual-data-center/vdc.py index 43115d9f7..d17a1f8f8 100644 --- a/azure-py-virtual-data-center/vdc.py +++ b/azure-py-virtual-data-center/vdc.py @@ -19,11 +19,11 @@ def bastion_host(stem, subnet_id, depends_on=None): ab = compute.BastionHost( f'{stem}-ab-', resource_group_name = resource_group_name, - ip_configuration = { - 'name': f'{stem}-ab-ipconf', - 'publicIpAddressId': ab_pip.id, - 'subnet_id': subnet_id, - }, + ip_configuration = compute.BastionHostIpConfigurationArgs( + name = f'{stem}-ab-ipconf', + public_ip_address_id = ab_pip.id, + subnet_id = subnet_id, + ), tags = tags, opts = ResourceOptions(parent=self, depends_on=depends_on), ) @@ -43,11 +43,11 @@ def expressroute_gateway(stem, subnet_id, depends_on=None): sku = 'Standard', type = 'ExpressRoute', vpn_type = 'RouteBased', - ip_configurations = [{ - 'name': f'{stem}-er-gw-ipconf', - 'publicIpAddressId': er_gw_pip.id, - 'subnet_id': subnet_id, - }], + ip_configurations = [network.VirtualNetworkGatewayIpConfigurationArgs( + name = f'{stem}-er-gw-ipconf', + public_ip_address_id = er_gw_pip.id, + subnet_id = subnet_id, + )], tags = tags, opts = ResourceOptions( parent=self, @@ -82,11 +82,11 @@ def firewall(stem, fw_sn_id, fwm_sn_id, depends_on=None): f'{stem}-fw-', resource_group_name = resource_group_name, # sku = 'AZFW_VNet', # not required but distinguishes from 'AZFW_Hub' - ip_configurations = [{ - 'name': f'{stem}-fw-ipconf', - 'publicIpAddressId': fw_pip.id, - 'subnet_id': fw_sn_id, - }], + ip_configurations = [network.FirewallIpConfigurationArgs( + name = f'{stem}-fw-ipconf', + public_ip_address_id = fw_pip.id, + subnet_id = fw_sn_id, + )], # management_ip_configuration = { # requires api 2019-11-01 or later # 'name': f'{stem}-fwm-ipconf', # 'publicIpAddressId': fwm_pip.id, @@ -248,11 +248,11 @@ def vpn_gateway(stem, subnet_id, depends_on=None): sku = 'VpnGw1', type = 'Vpn', vpn_type = 'RouteBased', - ip_configurations = [{ - 'name': f'{stem}-vpn-gw-ipconf', - 'publicIpAddressId': vpn_gw_pip.id, - 'subnet_id': subnet_id, - }], + ip_configurations = [network.VirtualNetworkGatewayIpConfigurationArgs( + name=f'{stem}-vpn-gw-ipconf', + public_ip_address_id=vpn_gw_pip.id, + subnet_id=subnet_id, + )], tags = tags, opts = ResourceOptions( parent=self, diff --git a/azure-py-vm-scaleset/Pulumi.yaml b/azure-py-vm-scaleset/Pulumi.yaml index f077831d8..53c2194fd 100644 --- a/azure-py-vm-scaleset/Pulumi.yaml +++ b/azure-py-vm-scaleset/Pulumi.yaml @@ -6,3 +6,6 @@ template: azure:environment: description: The Azure environment to use (`public`, `usgovernment`, `german`, `china`) default: public + azure:location: + description: The Azure location to use + default: WestUS diff --git a/azure-py-vm-scaleset/README.md b/azure-py-vm-scaleset/README.md index 5347389f5..337122ee0 100644 --- a/azure-py-vm-scaleset/README.md +++ b/azure-py-vm-scaleset/README.md @@ -6,9 +6,9 @@ This example provisions a Scale Set of Linux web servers with nginx deployed, co ## Prerequisites -- [Node.js](https://nodejs.org/en/download/) -- [Download and install the Pulumi CLI](https://www.pulumi.com/docs/get-started/install/) -- [Connect Pulumi with your Azure account](https://www.pulumi.com/docs/intro/cloud-providers/azure/setup/) (if your `az` CLI is configured, no further changes are required) +1. [Install Pulumi](https://www.pulumi.com/docs/get-started/install/) +1. [Configure Pulumi for Azure](https://www.pulumi.com/docs/intro/cloud-providers/azure/setup/) +1. [Configure Pulumi for Python](https://www.pulumi.com/docs/intro/languages/python/) ## Running the App diff --git a/azure-py-vm-scaleset/__main__.py b/azure-py-vm-scaleset/__main__.py index 83c45d579..d36653032 100644 --- a/azure-py-vm-scaleset/__main__.py +++ b/azure-py-vm-scaleset/__main__.py @@ -7,14 +7,14 @@ admin_password = config.get_secret("adminPassword") or random.RandomPassword( "pwd", length=20, - special="true").result + special=True).result domain = config.get("domain") or random.RandomString( "domain", length=10, - number="false", - special="false", - upper="false").result -application_port = config.get_float("applicationPort") or 80; + number=False, + special=False, + upper=False).result +application_port = config.get_float("applicationPort") or 80 resource_group = core.ResourceGroup("vmss-rg") @@ -27,10 +27,10 @@ load_balancer = lb.LoadBalancer( "lb", resource_group_name=resource_group.name, - frontend_ip_configurations=[{ - "name": "PublicIPAddress", - "publicIpAddressId": public_ip.id, - }]) + frontend_ip_configurations=[lb.LoadBalancerFrontendIpConfigurationArgs( + name="PublicIPAddress", + public_ip_address_id=public_ip.id, + )]) bpepool = lb.BackendAddressPool( "bpepool", @@ -62,59 +62,59 @@ subnet = network.Subnet( "subnet", resource_group_name=resource_group.name, - address_prefix="10.0.2.0/24", + address_prefixes=["10.0.2.0/24"], virtual_network_name=vnet.name, - enforce_private_link_endpoint_network_policies="false") + enforce_private_link_endpoint_network_policies=False) scale_set = compute.ScaleSet( "vmscaleset", resource_group_name=resource_group.name, - network_profiles=[{ - "ipConfigurations": [{ - "load_balancer_backend_address_pool_ids": [bpepool.id], - "name": "IPConfiguration", - "primary": "true", - "subnet_id": subnet.id, - }], - "name": "networkprofile", - "primary": "true", - }], - os_profile={ - "admin_username": admin_user, - "admin_password": admin_password, - "computer_name_prefix": "vmlab", - "custom_data": """ + network_profiles=[compute.ScaleSetNetworkProfileArgs( + ip_configurations=[compute.ScaleSetNetworkProfileIpConfigurationArgs( + load_balancer_backend_address_pool_ids=[bpepool.id], + name="IPConfiguration", + primary=True, + subnet_id=subnet.id, + )], + name="networkprofile", + primary=True, + )], + os_profile=compute.ScaleSetOsProfileArgs( + admin_username=admin_user, + admin_password=admin_password, + computer_name_prefix="vmlab", + custom_data=""" #cloud-config packages: - nginx - """ - }, - os_profile_linux_config={ - "disable_password_authentication": "false" - }, - sku={ - "capacity": 1, - "name": "Standard_DS1_v2", - "tier": "Standard" - }, - storage_profile_data_disks=[{ - "caching": "ReadWrite", - "create_option": "Empty", - "disk_size_gb": 10, - "lun": 0 - }], - storage_profile_image_reference={ - "offer": "UbuntuServer", - "publisher": "Canonical", - "sku": "16.04-LTS", - "version": "latest" - }, - storage_profile_os_disk={ - "caching": "ReadWrite", - "create_option": "FromImage", - "managed_disk_type": "Standard_LRS", - "name": "" - }, + """, + ), + os_profile_linux_config=compute.ScaleSetOsProfileLinuxConfigArgs( + disable_password_authentication=False, + ), + sku=compute.ScaleSetSkuArgs( + capacity=1, + name="Standard_DS1_v2", + tier="Standard", + ), + storage_profile_data_disks=[compute.ScaleSetStorageProfileDataDiskArgs( + caching="ReadWrite", + create_option="Empty", + disk_size_gb=10, + lun=0, + )], + storage_profile_image_reference=compute.ScaleSetStorageProfileImageReferenceArgs( + offer="UbuntuServer", + publisher="Canonical", + sku="16.04-LTS", + version="latest", + ), + storage_profile_os_disk=compute.ScaleSetStorageProfileOsDiskArgs( + caching="ReadWrite", + create_option="FromImage", + managed_disk_type="Standard_LRS", + name="", + ), upgrade_policy_mode="Manual", __opts__=ResourceOptions(depends_on=[bpepool])) diff --git a/azure-py-webserver-component/README.md b/azure-py-webserver-component/README.md index aa0de1e74..c3b073683 100644 --- a/azure-py-webserver-component/README.md +++ b/azure-py-webserver-component/README.md @@ -88,8 +88,6 @@ the virtual machine that we are going to create. + 8 created Duration: 2m9s - - Permalink: https://app.pulumi.com/clstokes/azure-py-webserver-component/dev/updates/3 ``` 1. Get the IP address of the newly-created instance from the stack's outputs: @@ -148,7 +146,6 @@ the virtual machine that we are going to create. Duration: 4m28s - Permalink: https://app.pulumi.com/clstokes/azure-py-webserver-component/dev/updates/4 The resources in the stack have been deleted, but the history and configuration associated with the stack are still maintained. If you want to remove the stack completely, run 'pulumi stack rm dev'. ``` diff --git a/azure-py-webserver-component/__main__.py b/azure-py-webserver-component/__main__.py index f4a19c903..5d1793539 100644 --- a/azure-py-webserver-component/__main__.py +++ b/azure-py-webserver-component/__main__.py @@ -14,7 +14,7 @@ resource_group_name=resource_group.name, location=resource_group.location, address_spaces=["10.0.0.0/16"], - subnets=[{"name": "default", "address_prefix": "10.0.1.0/24"}], + subnets=[network.VirtualNetworkSubnetArgs(name="default", address_prefix="10.0.1.0/24")], opts=ResourceOptions(parent=resource_group), ) @@ -22,7 +22,7 @@ "server-subnet", resource_group_name=resource_group.name, virtual_network_name=net.name, - address_prefix="10.0.2.0/24", + address_prefixes=["10.0.2.0/24"], opts=ResourceOptions(parent=net), ) diff --git a/azure-py-webserver-component/webserver.py b/azure-py-webserver-component/webserver.py index b0e9fda50..302229a88 100644 --- a/azure-py-webserver-component/webserver.py +++ b/azure-py-webserver-component/webserver.py @@ -1,4 +1,4 @@ -from pulumi import Config, export, asset, Output, ComponentResource, ResourceOptions +from pulumi import asset, Input, Output, ComponentResource, ResourceOptions from pulumi_azure import core, compute, network @@ -7,8 +7,8 @@ def __init__( self, resource_group: core.ResourceGroup, subnet: network.Subnet, - username: str, - password: str, + username: Input[str], + password: Input[str], ): self.resource_group = resource_group self.subnet = subnet @@ -35,12 +35,12 @@ def __init__(self, name: str, args: WebServerArgs, opts: ResourceOptions = None) resource_group_name=args.resource_group.name, location=args.resource_group.location, ip_configurations=[ - { - "name": "webserveripcfg", - "subnet_id": args.subnet.id, - "private_ip_address_allocation": "Dynamic", - "public_ip_address_id": public_ip.id, - } + network.NetworkInterfaceIpConfigurationArgs( + name="webserveripcfg", + subnet_id=args.subnet.id, + private_ip_address_allocation="Dynamic", + public_ip_address_id=public_ip.id, + ) ], opts=child_opts, ) @@ -57,20 +57,25 @@ def __init__(self, name: str, args: WebServerArgs, opts: ResourceOptions = None) vm_size="Standard_A0", delete_data_disks_on_termination=True, delete_os_disk_on_termination=True, - os_profile={ - "computer_name": "hostname", - "admin_username": args.username, - "admin_password": args.password, - "custom_data": userdata, - }, - os_profile_linux_config={"disable_password_authentication": False}, - storage_os_disk={"create_option": "FromImage", "name": "myosdisk1"}, - storage_image_reference={ - "publisher": "canonical", - "offer": "UbuntuServer", - "sku": "16.04-LTS", - "version": "latest", - }, + os_profile=compute.VirtualMachineOsProfileArgs( + computer_name="hostname", + admin_username=args.username, + admin_password=args.password, + custom_data=userdata, + ), + os_profile_linux_config=compute.VirtualMachineOsProfileLinuxConfigArgs( + disable_password_authentication=False, + ), + storage_os_disk=compute.VirtualMachineStorageOsDiskArgs( + create_option="FromImage", + name="myosdisk1", + ), + storage_image_reference=compute.VirtualMachineStorageImageReferenceArgs( + publisher="canonical", + offer="UbuntuServer", + sku="16.04-LTS", + version="latest", + ), opts=child_opts, ) diff --git a/azure-py-webserver/README.md b/azure-py-webserver/README.md index e390d1e77..eda5ed13e 100644 --- a/azure-py-webserver/README.md +++ b/azure-py-webserver/README.md @@ -36,13 +36,13 @@ This example deploys an Azure Virtual Machine and starts an HTTP server on it. the virtual machine that we are going to create. ```bash - $ pulumi config set azure-web:username myusername + $ pulumi config set username myusername ``` The password is a secret, so we can ask Pulumi to encrypt the configuration: ```bash - $ pulumi config set --secret azure-web:password Hunter2hunter2 + $ pulumi config set --secret password Hunter2hunter2 ``` 1. Run `pulumi up` to preview and deploy the changes: @@ -82,25 +82,23 @@ the virtual machine that we are going to create. + 7 created Duration: 2m55s - - Permalink: https://app.pulumi.com/swgillespie/azure-py-webserver/azuredev/updates/3 ``` -1. Get the IP address of the newly-created instance from the stack's outputs: +2. Get the IP address of the newly-created instance from the stack's outputs: ```bash $ pulumi stack output public_ip 137.117.15.111 ``` -1. Check to see that your server is now running: +3. Check to see that your server is now running: ```bash $ curl http://$(pulumi stack output public_ip) Hello, World! ``` -1. Destroy the stack: +4. Destroy the stack: ```bash ▶ pulumi destroy --yes @@ -133,6 +131,4 @@ the virtual machine that we are going to create. - 7 deleted Duration: 3m49s - - Permalink: https://app.pulumi.com/swgillespie/azure-py-webserver/azuredev/updates/4 ``` diff --git a/azure-py-webserver/__main__.py b/azure-py-webserver/__main__.py index fae95e546..1af1b81e4 100644 --- a/azure-py-webserver/__main__.py +++ b/azure-py-webserver/__main__.py @@ -2,27 +2,29 @@ from pulumi import Output from pulumi_azure import core, compute, network -config = pulumi.Config("azure-web") +config = pulumi.Config() username = config.require("username") -password = config.require("password") +password = config.require_secret("password") resource_group = core.ResourceGroup("server", location="West US") + net = network.VirtualNetwork( "server-network", resource_group_name=resource_group.name, location=resource_group.location, address_spaces=["10.0.0.0/16"], - subnets=[{ - "name": "default", - "address_prefix": "10.0.1.0/24", - }]) + subnets=[network.VirtualNetworkSubnetArgs( + name="default", + address_prefix="10.0.1.0/24", + )]) subnet = network.Subnet( "server-subnet", resource_group_name=resource_group.name, virtual_network_name=net.name, - address_prefix="10.0.2.0/24", - enforce_private_link_endpoint_network_policies="false") + address_prefixes=["10.0.2.0/24"], + enforce_private_link_endpoint_network_policies=False) + public_ip = network.PublicIp( "server-ip", resource_group_name=resource_group.name, @@ -33,12 +35,12 @@ "server-nic", resource_group_name=resource_group.name, location=resource_group.location, - ip_configurations=[{ - "name": "webserveripcfg", - "subnet_id": subnet.id, - "private_ip_address_allocation": "Dynamic", - "public_ip_address_id": public_ip.id, - }]) + ip_configurations=[network.NetworkInterfaceIpConfigurationArgs( + name="webserveripcfg", + subnet_id=subnet.id, + private_ip_address_allocation="Dynamic", + public_ip_address_id=public_ip.id, + )]) userdata = """#!/bin/bash @@ -53,28 +55,28 @@ vm_size="Standard_A0", delete_data_disks_on_termination=True, delete_os_disk_on_termination=True, - os_profile={ - "computer_name": "hostname", - "admin_username": username, - "admin_password": password, - "custom_data": userdata, - }, - os_profile_linux_config={ - "disable_password_authentication": False, - }, - storage_os_disk={ - "create_option": "FromImage", - "name": "myosdisk1", - }, - storage_image_reference={ - "publisher": "canonical", - "offer": "UbuntuServer", - "sku": "16.04-LTS", - "version": "latest", - }) + os_profile=compute.VirtualMachineOsProfileArgs( + computer_name="hostname", + admin_username=username, + admin_password=password, + custom_data=userdata, + ), + os_profile_linux_config=compute.VirtualMachineOsProfileLinuxConfigArgs( + disable_password_authentication=False, + ), + storage_os_disk=compute.VirtualMachineStorageOsDiskArgs( + create_option="FromImage", + name="myosdisk1", + ), + storage_image_reference=compute.VirtualMachineStorageImageReferenceArgs( + publisher="canonical", + offer="UbuntuServer", + sku="16.04-LTS", + version="latest", + )) -combined_output = Output.all(vm.id, public_ip.name, - public_ip.resource_group_name) +combined_output = Output.all(vm.id, public_ip.name, public_ip.resource_group_name) public_ip_addr = combined_output.apply( lambda lst: network.get_public_ip(name=lst[1], resource_group_name=lst[2])) + pulumi.export("public_ip", public_ip_addr.ip_address) diff --git a/digitalocean-py-k8s/__main__.py b/digitalocean-py-k8s/__main__.py index c594ff558..9ba130059 100644 --- a/digitalocean-py-k8s/__main__.py +++ b/digitalocean-py-k8s/__main__.py @@ -1,8 +1,9 @@ import pulumi_digitalocean as do -from pulumi import Config, export, Output, ResourceOptions +from pulumi import Config, export, ResourceOptions, CustomTimeouts from pulumi_kubernetes import Provider -from pulumi_kubernetes.apps.v1 import Deployment -from pulumi_kubernetes.core.v1 import Service +from pulumi_kubernetes.apps.v1 import Deployment, DeploymentSpecArgs +from pulumi_kubernetes.core.v1 import ContainerArgs, PodSpecArgs, PodTemplateSpecArgs, Service, ServicePortArgs, ServiceSpecArgs +from pulumi_kubernetes.meta.v1 import LabelSelectorArgs, ObjectMetaArgs config = Config() node_count = config.get_float("nodeCount") or 3 @@ -13,35 +14,35 @@ "do-cluster", region="sfo2", version="latest", - node_pool={ - "name": "default", - "size": "s-2vcpu-2gb", - "node_count": node_count - }) + node_pool=do.KubernetesClusterNodePoolArgs( + name="default", + size="s-2vcpu-2gb", + node_count=node_count + )) -k8s_provider = Provider("do-k8s", kubeconfig=cluster.kube_configs[0]["rawConfig"] ) +k8s_provider = Provider("do-k8s", kubeconfig=cluster.kube_configs.apply(lambda c: c[0].raw_config)) app_labels = { "app": "app-nginx" } app = Deployment( "do-app-dep", - spec={ - 'selector': { 'matchLabels': app_labels }, - 'replicas': 1, - 'template': { - 'metadata': { 'labels': app_labels }, - 'spec': { 'containers': [{ 'name': 'nginx', 'image': 'nginx' }] }, - }, - }, __opts__=ResourceOptions(provider=k8s_provider)) + spec=DeploymentSpecArgs( + selector=LabelSelectorArgs(match_labels=app_labels), + replicas=1, + template=PodTemplateSpecArgs( + metadata=ObjectMetaArgs(labels=app_labels), + spec=PodSpecArgs(containers=[ContainerArgs(name='nginx', image='nginx')]), + ), + ), __opts__=ResourceOptions(provider=k8s_provider)) ingress = Service( 'do-app-svc', - spec={ - 'type': 'LoadBalancer', - 'selector': app_labels, - 'ports': [{'port': 80}], - }, __opts__=ResourceOptions(provider=k8s_provider, custom_timeouts={"create":"15m", "delete": "15m"})) + spec=ServiceSpecArgs( + type='LoadBalancer', + selector=app_labels, + ports=[ServicePortArgs(port=80)], + ), __opts__=ResourceOptions(provider=k8s_provider, custom_timeouts=CustomTimeouts(create="15m", delete="15m"))) -ingress_ip=ingress.status['load_balancer']['ingress'][0]['ip'] +ingress_ip = ingress.status.apply(lambda s: s.load_balancer.ingress[0].ip) export('ingress_ip', ingress_ip) diff --git a/digitalocean-py-loadbalanced-droplets/__main__.py b/digitalocean-py-loadbalanced-droplets/__main__.py index 99754e3d2..c029dc93f 100644 --- a/digitalocean-py-loadbalanced-droplets/__main__.py +++ b/digitalocean-py-loadbalanced-droplets/__main__.py @@ -27,16 +27,16 @@ loadbalancer = do.LoadBalancer( "public", droplet_tag=droplet_type_tag.name, - forwarding_rules=[{ - "entry_port": 80, - "entry_protocol": "http", - "target_port": 80, - "target_protocol": "http", - }], - healthcheck={ - "port": 80, - "protocol": "tcp", - }, + forwarding_rules=[do.LoadBalancerForwardingRuleArgs( + entry_port=80, + entry_protocol="http", + target_port=80, + target_protocol="http", + )], + healthcheck=do.LoadBalancerHealthcheckArgs( + port=80, + protocol="tcp", + ), region=region, ) diff --git a/gcp-py-gke/__main__.py b/gcp-py-gke/__main__.py index c4955218e..387df2220 100644 --- a/gcp-py-gke/__main__.py +++ b/gcp-py-gke/__main__.py @@ -1,16 +1,17 @@ from pulumi import Config, export, get_project, get_stack, Output, ResourceOptions from pulumi_gcp.config import project, zone -from pulumi_gcp.container import Cluster, get_engine_versions +from pulumi_gcp.container import Cluster, ClusterMasterAuthArgs, ClusterNodeConfigArgs from pulumi_kubernetes import Provider -from pulumi_kubernetes.apps.v1 import Deployment -from pulumi_kubernetes.core.v1 import Service +from pulumi_kubernetes.apps.v1 import Deployment, DeploymentSpecArgs +from pulumi_kubernetes.core.v1 import ContainerArgs, PodSpecArgs, PodTemplateSpecArgs, Service, ServicePortArgs, ServiceSpecArgs +from pulumi_kubernetes.meta.v1 import LabelSelectorArgs, ObjectMetaArgs from pulumi_random import RandomPassword # Read in some configurable settings for our cluster: config = Config(None) # nodeCount is the number of cluster nodes to provision. Defaults to 3 if unspecified. -NODE_COUNT = config.get('node_count') or 3 +NODE_COUNT = config.get_int('node_count') or 3 # nodeMachineType is the machine type to use for cluster nodes. Defaults to n1-standard-1 if unspecified. # See https://cloud.google.com/compute/docs/machine-types for more details on available machine types. NODE_MACHINE_TYPE = config.get('node_machine_type') or 'n1-standard-1' @@ -26,16 +27,16 @@ initial_node_count=NODE_COUNT, node_version=MASTER_VERSION, min_master_version=MASTER_VERSION, - master_auth={ 'username': USERNAME, 'password': PASSWORD }, - node_config={ - 'machine_type': NODE_MACHINE_TYPE, - 'oauth_scopes': [ + master_auth=ClusterMasterAuthArgs(username=USERNAME, password=PASSWORD), + node_config=ClusterNodeConfigArgs( + machine_type=NODE_MACHINE_TYPE, + oauth_scopes=[ 'https://www.googleapis.com/auth/compute', 'https://www.googleapis.com/auth/devstorage.read_only', 'https://www.googleapis.com/auth/logging.write', 'https://www.googleapis.com/auth/monitoring' ], - }, + ), ) # Manufacture a GKE-style Kubeconfig. Note that this is slightly "different" because of the way GKE requires @@ -74,25 +75,25 @@ # Create a canary deployment to test that this cluster works. labels = { 'app': 'canary-{0}-{1}'.format(get_project(), get_stack()) } canary = Deployment('canary', - spec={ - 'selector': { 'matchLabels': labels }, - 'replicas': 1, - 'template': { - 'metadata': { 'labels': labels }, - 'spec': { 'containers': [{ 'name': 'nginx', 'image': 'nginx' }] }, - }, - }, __opts__=ResourceOptions(provider=k8s_provider) + spec=DeploymentSpecArgs( + selector=LabelSelectorArgs(match_labels=labels), + replicas=1, + template=PodTemplateSpecArgs( + metadata=ObjectMetaArgs(labels=labels), + spec=PodSpecArgs(containers=[ContainerArgs(name='nginx', image='nginx')]), + ), + ), __opts__=ResourceOptions(provider=k8s_provider) ) ingress = Service('ingress', - spec={ - 'type': 'LoadBalancer', - 'selector': labels, - 'ports': [{'port': 80}], - }, __opts__=ResourceOptions(provider=k8s_provider) + spec=ServiceSpecArgs( + type='LoadBalancer', + selector=labels, + ports=[ServicePortArgs(port=80)], + ), __opts__=ResourceOptions(provider=k8s_provider) ) # Finally, export the kubeconfig so that the client can easily access the cluster. export('kubeconfig', k8s_config) # Export the k8s ingress IP to access the canary deployment -export('ingress_ip', Output.all(ingress.status['load_balancer']['ingress'][0]['ip'])) +export('ingress_ip', ingress.status.apply(lambda status: status.load_balancer.ingress[0].ip)) diff --git a/gcp-py-instance-nginx/__main__.py b/gcp-py-instance-nginx/__main__.py index c9086bf73..8d780c667 100644 --- a/gcp-py-instance-nginx/__main__.py +++ b/gcp-py-instance-nginx/__main__.py @@ -10,14 +10,14 @@ "poc", network=network.self_link, allows=[ - { - "protocol": "tcp", - "ports": ["22"] - }, - { - "protocol": "tcp", - "ports": ["80"] - } + compute.FirewallAllowArgs( + protocol="tcp", + ports=["22"] + ), + compute.FirewallAllowArgs( + protocol="tcp", + ports=["80"] + ), ] ) @@ -33,18 +33,18 @@ instance = compute.Instance( "poc", machine_type="f1-micro", - boot_disk={ - "initializeParams": { - "image": "ubuntu-os-cloud/ubuntu-1804-bionic-v20200414" - } - }, + boot_disk=compute.InstanceBootDiskArgs( + initialize_params=compute.InstanceBootDiskInitializeParamsArgs( + image="ubuntu-os-cloud/ubuntu-1804-bionic-v20200414" + ), + ), network_interfaces=[ - { - "network": network.id, - "accessConfigs": [{ - "nat_ip": instance_addr.address - }] - } + compute.InstanceNetworkInterfaceArgs( + network=network.id, + access_configs=[compute.InstanceNetworkInterfaceAccessConfigArgs( + nat_ip=instance_addr.address + )] + ) ], metadata_startup_script=script, ) @@ -72,33 +72,33 @@ container_instance = compute.Instance( "poc-container-instance", machine_type="f1-micro", - boot_disk={ - "initializeParams": { - "image": "cos-cloud/cos-stable-81-12871-69-0" - } - }, + boot_disk=compute.InstanceBootDiskArgs( + initialize_params=compute.InstanceBootDiskInitializeParamsArgs( + image="cos-cloud/cos-stable-81-12871-69-0", + ) + ), metadata={ "gce-container-declaration": container_instance_metadata_script, }, network_interfaces=[ - { - "network": network.id, - "accessConfigs": [{ - "nat_ip": container_instance_addr.address - }] - } + compute.InstanceNetworkInterfaceArgs( + network=network.id, + access_configs=[compute.InstanceNetworkInterfaceAccessConfigArgs( + nat_ip=container_instance_addr.address + )] + ) ], - service_account={ - "email": "default", - "scopes": [ + service_account=compute.InstanceServiceAccountArgs( + email="default", + scopes=[ "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring.write", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/servicecontrol", - "https://www.googleapis.com/auth/trace.append", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/servicecontrol", + "https://www.googleapis.com/auth/trace.append", ], - }, + ), ) pulumi.export("container_instance_name", container_instance.name) diff --git a/gcp-py-network-component/__main__.py b/gcp-py-network-component/__main__.py index 59e861012..0e813c44d 100644 --- a/gcp-py-network-component/__main__.py +++ b/gcp-py-network-component/__main__.py @@ -1,4 +1,4 @@ -from pulumi import export, ResourceOptions +from pulumi import export from pulumi_gcp import compute from config import project, owner, subnet_cidr_blocks, nginx_install_script import instance @@ -26,4 +26,4 @@ export('network', network.network.name) export('nginx_public_ip', - nginx_instance.instance.network_interfaces[0]["accessConfigs"][0]["natIp"]) + nginx_instance.instance.network_interfaces.apply(lambda ni: ni[0].access_configs[0].nat_ip)) diff --git a/gcp-py-network-component/instance.py b/gcp-py-network-component/instance.py index 36fec5d17..f019b6533 100644 --- a/gcp-py-network-component/instance.py +++ b/gcp-py-network-component/instance.py @@ -31,10 +31,10 @@ def __init__(self, firewall = compute.Firewall(name, network=args.subnet.network, - allows=[{ - "protocol": "tcp", - "ports": args.ports, - }], + allows=[compute.FirewallAllowArgs( + protocol="tcp", + ports=args.ports, + )], target_tags=[args.service_name], opts=ResourceOptions(parent=self) ) @@ -45,17 +45,17 @@ def __init__(self, self.instance = compute.Instance(name, machine_type=args.machine_type, - boot_disk={ - "initializeParams": { - "image": "ubuntu-os-cloud/ubuntu-1804-lts" - } - }, - network_interfaces=[{ - "subnetwork": args.subnet.self_link, - "accessConfigs": [{ - "nat_ip": addr.address - }] - }], + boot_disk=compute.InstanceBootDiskArgs( + initialize_params=compute.InstanceBootDiskInitializeParamsArgs( + image="ubuntu-os-cloud/ubuntu-1804-lts" + ) + ), + network_interfaces=[compute.InstanceNetworkInterfaceArgs( + subnetwork=args.subnet.self_link, + access_configs=[compute.InstanceNetworkInterfaceAccessConfigArgs( + nat_ip=addr.address + )] + )], tags=[args.service_name], metadata=args.metadata, metadata_startup_script=args.metadata_startup_script, diff --git a/gcp-py-serverless-raw/__main__.py b/gcp-py-serverless-raw/__main__.py index 1e4cff898..262df6466 100644 --- a/gcp-py-serverless-raw/__main__.py +++ b/gcp-py-serverless-raw/__main__.py @@ -16,7 +16,7 @@ runtime="python37", source_archive_object=py_bucket_object.name, entry_point="handler", - trigger_http="true", + trigger_http=True, available_memory_mb=128, ) @@ -44,7 +44,7 @@ runtime="go111", source_archive_object=go_bucket_object.name, entry_point="Handler", - trigger_http="true", + trigger_http=True, available_memory_mb=128, ) diff --git a/gcp-py-webserver/__main__.py b/gcp-py-webserver/__main__.py index 750e729db..b9114ac23 100644 --- a/gcp-py-webserver/__main__.py +++ b/gcp-py-webserver/__main__.py @@ -3,17 +3,17 @@ from pulumi_gcp import compute compute_network = compute.Network( - "network", + "network", auto_create_subnetworks=True, ) compute_firewall = compute.Firewall( - "firewall", + "firewall", network=compute_network.self_link, - allows=[{ - "protocol": "tcp", - "ports": [ "22", "80" ], - }] + allows=[compute.FirewallAllowArgs( + protocol="tcp", + ports=["22", "80"], + )] ) # A simple bash script that will run when the webserver is initalized @@ -23,25 +23,25 @@ instance_addr = compute.address.Address("address") compute_instance = compute.Instance( - "instance", + "instance", machine_type="f1-micro", metadata_startup_script=startup_script, - boot_disk={ - "initializeParams": { - "image": "debian-cloud/debian-9-stretch-v20181210" - } - }, - network_interfaces=[{ - "network": compute_network.id, - "accessConfigs": [{ - "natIp": instance_addr.address - }], - }], - service_account={ - "scopes": ["https://www.googleapis.com/auth/cloud-platform"], - }, + boot_disk=compute.InstanceBootDiskArgs( + initialize_params=compute.InstanceBootDiskInitializeParamsArgs( + image="debian-cloud/debian-9-stretch-v20181210" + ) + ), + network_interfaces=[compute.InstanceNetworkInterfaceArgs( + network=compute_network.id, + access_configs=[compute.InstanceNetworkInterfaceAccessConfigArgs( + nat_ip=instance_addr.address + )], + )], + service_account=compute.InstanceServiceAccountArgs( + scopes=["https://www.googleapis.com/auth/cloud-platform"], + ), opts=ResourceOptions(depends_on=[compute_firewall]), ) pulumi.export("instanceName", compute_instance.name) -pulumi.export("instanceIP", instance_addr.address) \ No newline at end of file +pulumi.export("instanceIP", instance_addr.address) diff --git a/kubernetes-py-guestbook/components/__main__.py b/kubernetes-py-guestbook/components/__main__.py index 8786a2808..b6798b645 100644 --- a/kubernetes-py-guestbook/components/__main__.py +++ b/kubernetes-py-guestbook/components/__main__.py @@ -34,6 +34,6 @@ replicas=3, ports=[80], allocate_ip_address=True, - is_minikube=config.get_bool("isMinikube")) + is_minikube=isMinikube) pulumi.export("frontend_ip", frontend.ip_address) diff --git a/kubernetes-py-guestbook/components/service_deployment.py b/kubernetes-py-guestbook/components/service_deployment.py index a582d88a9..ccf3df0b8 100644 --- a/kubernetes-py-guestbook/components/service_deployment.py +++ b/kubernetes-py-guestbook/components/service_deployment.py @@ -12,12 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from typing import Sequence import pulumi from pulumi import ResourceOptions, ComponentResource, Output -from pulumi_kubernetes.apps.v1 import Deployment -from pulumi_kubernetes.core.v1 import Service, Namespace +from pulumi_kubernetes.apps.v1 import Deployment, DeploymentSpecArgs +from pulumi_kubernetes.core.v1 import ( + ContainerArgs, + ContainerPortArgs, + PodSpecArgs, + PodTemplateSpecArgs, + ResourceRequirementsArgs, + Service, + ServicePortArgs, + ServiceSpecArgs, +) +from pulumi_kubernetes.meta.v1 import LabelSelectorArgs, ObjectMetaArgs class ServiceDeployment(ComponentResource): @@ -26,45 +36,50 @@ class ServiceDeployment(ComponentResource): ip_address: Output[str] def __init__(self, name: str, image: str, - resources: dict = None, replicas: int = None, - ports: List[int] = None, allocate_ip_address: bool = None, + resources: ResourceRequirementsArgs = None, replicas: int = None, + ports: Sequence[int] = None, allocate_ip_address: bool = None, is_minikube: bool = None, opts: ResourceOptions = None): super().__init__('k8sx:component:ServiceDeployment', name, {}, opts) labels = {"app": name} - container = { - "name": name, - "image": image, - "resources": resources or {"requests": {"cpu": "100m", "memory": "100Mi"}}, - "ports": [{"container_port": p} for p in ports] if ports else None, - } + container = ContainerArgs( + name=name, + image=image, + resources=resources or ResourceRequirementsArgs( + requests={ + "cpu": "100m", + "memory": "100Mi" + }, + ), + ports=[ContainerPortArgs(container_port=p) for p in ports] if ports else None, + ) self.deployment = Deployment( name, - spec={ - "selector": {"match_labels": labels}, - "replicas": 1, - "template": { - "metadata": {"labels": labels}, - "spec": {"containers": [container]}, - }, - }, + spec=DeploymentSpecArgs( + selector=LabelSelectorArgs(match_labels=labels), + replicas=replicas if replicas is not None else 1, + template=PodTemplateSpecArgs( + metadata=ObjectMetaArgs(labels=labels), + spec=PodSpecArgs(containers=[container]), + ), + ), opts=pulumi.ResourceOptions(parent=self)) self.service = Service( name, - metadata={ - "name": name, - "labels": self.deployment.metadata['labels'], - }, - spec={ - "ports": [{"port": p, "targetPort": p} for p in ports] if ports else None, - "selector": self.deployment.spec['template']['metadata']['labels'], - "type": ("ClusterIP" if is_minikube else "LoadBalancer") if allocate_ip_address else None, - }, + metadata=ObjectMetaArgs( + name=name, + labels=self.deployment.metadata.apply(lambda m: m.labels), + ), + spec=ServiceSpecArgs( + ports=[ServicePortArgs(port=p, target_port=p) for p in ports] if ports else None, + selector=self.deployment.spec.apply(lambda s: s.template.metadata.labels), + type=("ClusterIP" if is_minikube else "LoadBalancer") if allocate_ip_address else None, + ), opts=pulumi.ResourceOptions(parent=self)) if allocate_ip_address: if is_minikube: - self.ip_address = self.service.spec['clusterIP'] + self.ip_address = self.service.spec.apply(lambda s: s.cluster_ip) else: - ingress=self.service.status['load_balancer']['ingress'][0] - self.ip_address = ingress.apply(lambda i: ingress["ip"] if "ip" in i else ingress['hostname']) - self.register_outputs({}) \ No newline at end of file + ingress=self.service.status.apply(lambda s: s.load_balancer.ingress[0]) + self.ip_address = ingress.apply(lambda i: ingress.ip or ingress.hostname or "") + self.register_outputs({}) diff --git a/kubernetes-py-guestbook/simple/__main__.py b/kubernetes-py-guestbook/simple/__main__.py index 38c742bb0..24cffab38 100644 --- a/kubernetes-py-guestbook/simple/__main__.py +++ b/kubernetes-py-guestbook/simple/__main__.py @@ -13,8 +13,19 @@ # limitations under the License. import pulumi -from pulumi_kubernetes.apps.v1 import Deployment -from pulumi_kubernetes.core.v1 import Service, Namespace +from pulumi_kubernetes.apps.v1 import Deployment, DeploymentSpecArgs +from pulumi_kubernetes.core.v1 import ( + ContainerArgs, + ContainerPortArgs, + EnvVarArgs, + PodSpecArgs, + PodTemplateSpecArgs, + ResourceRequirementsArgs, + Service, + ServicePortArgs, + ServiceSpecArgs, +) +from pulumi_kubernetes.meta.v1 import LabelSelectorArgs, ObjectMetaArgs # Minikube does not implement services of type `LoadBalancer`; require the user to specify if we're # running on minikube, and if so, create only services of type ClusterIP. @@ -27,46 +38,46 @@ redis_leader_deployment = Deployment( "redis-leader", - spec={ - "selector": { - "match_labels": redis_leader_labels, - }, - "replicas": 1, - "template": { - "metadata": { - "labels": redis_leader_labels, - }, - "spec": { - "containers": [{ - "name": "redis-leader", - "image": "redis", - "resources": { - "requests": { + spec=DeploymentSpecArgs( + selector=LabelSelectorArgs( + match_labels=redis_leader_labels, + ), + replicas=1, + template=PodTemplateSpecArgs( + metadata=ObjectMetaArgs( + labels=redis_leader_labels, + ), + spec=PodSpecArgs( + containers=[ContainerArgs( + name="redis-leader", + image="redis", + resources=ResourceRequirementsArgs( + requests={ "cpu": "100m", "memory": "100Mi", }, - }, - "ports": [{ - "container_port": 6379, - }], - }], - }, - }, - }) + ), + ports=[ContainerPortArgs( + container_port=6379, + )], + )], + ), + ), + )) redis_leader_service = Service( "redis-leader", - metadata={ - "name": "redis-leader", - "labels": redis_leader_labels - }, - spec={ - "ports": [{ - "port": 6379, - "target_port": 6379, - }], - "selector": redis_leader_labels - }) + metadata=ObjectMetaArgs( + name="redis-leader", + labels=redis_leader_labels + ), + spec=ServiceSpecArgs( + ports=[ServicePortArgs( + port=6379, + target_port=6379, + )], + selector=redis_leader_labels + )) redis_replica_labels = { "app": "redis-replica", @@ -74,54 +85,54 @@ redis_replica_deployment = Deployment( "redis-replica", - spec={ - "selector": { - "match_labels": redis_replica_labels - }, - "replicas": 1, - "template": { - "metadata": { - "labels": redis_replica_labels, - }, - "spec": { - "containers": [{ - "name": "redis-replica", - "image": "pulumi/guestbook-redis-replica", - "resources": { - "requests": { + spec=DeploymentSpecArgs( + selector=LabelSelectorArgs( + match_labels=redis_replica_labels + ), + replicas=1, + template=PodTemplateSpecArgs( + metadata=ObjectMetaArgs( + labels=redis_replica_labels, + ), + spec=PodSpecArgs( + containers=[ContainerArgs( + name="redis-replica", + image="pulumi/guestbook-redis-replica", + resources=ResourceRequirementsArgs( + requests={ "cpu": "100m", "memory": "100Mi", }, - }, - "env": [{ - "name": "GET_HOSTS_FROM", - "value": "dns", + ), + env=[EnvVarArgs( + name="GET_HOSTS_FROM", + value="dns", # If your cluster config does not include a dns service, then to instead access an environment # variable to find the leader's host, comment out the 'value: dns' line above, and # uncomment the line below: # value: "env" - }], - "ports": [{ - "container_port": 6379, - }], - }], - }, - }, - }) + )], + ports=[ContainerPortArgs( + container_port=6379, + )], + )], + ), + ), + )) redis_replica_service = Service( "redis-replica", - metadata={ - "name": "redis-replica", - "labels": redis_replica_labels - }, - spec={ - "ports": [{ - "port": 6379, - "target_port": 6379, - }], - "selector": redis_replica_labels - }) + metadata=ObjectMetaArgs( + name="redis-replica", + labels=redis_replica_labels + ), + spec=ServiceSpecArgs( + ports=[ServicePortArgs( + port=6379, + target_port=6379, + )], + selector=redis_replica_labels + )) # Frontend frontend_labels = { @@ -130,59 +141,59 @@ frontend_deployment = Deployment( "frontend", - spec={ - "selector": { - "match_labels": frontend_labels, - }, - "replicas": 3, - "template": { - "metadata": { - "labels": frontend_labels, - }, - "spec": { - "containers": [{ - "name": "php-redis", - "image": "pulumi/guestbook-php-redis", - "resources": { - "requests": { + spec=DeploymentSpecArgs( + selector=LabelSelectorArgs( + match_labels=frontend_labels, + ), + replicas=3, + template=PodTemplateSpecArgs( + metadata=ObjectMetaArgs( + labels=frontend_labels, + ), + spec=PodSpecArgs( + containers=[ContainerArgs( + name="php-redis", + image="pulumi/guestbook-php-redis", + resources=ResourceRequirementsArgs( + requests={ "cpu": "100m", "memory": "100Mi", }, - }, - "env": [{ - "name": "GET_HOSTS_FROM", - "value": "dns", + ), + env=[EnvVarArgs( + name="GET_HOSTS_FROM", + value="dns", # If your cluster config does not include a dns service, then to instead access an environment # variable to find the leader's host, comment out the 'value: dns' line above, and # uncomment the line below: # "value": "env" - }], - "ports": [{ - "container_port": 80, - }], - }], - }, - }, - }) + )], + ports=[ContainerPortArgs( + container_port=80, + )], + )], + ), + ), + )) frontend_service = Service( "frontend", - metadata={ - "name": "frontend", - "labels": frontend_labels, - }, - spec={ - "type": "ClusterIP" if isMinikube else "LoadBalancer", - "ports": [{ - "port": 80 - }], - "selector": frontend_labels, - }) + metadata=ObjectMetaArgs( + name="frontend", + labels=frontend_labels, + ), + spec=ServiceSpecArgs( + type="ClusterIP" if isMinikube else "LoadBalancer", + ports=[ServicePortArgs( + port=80 + )], + selector=frontend_labels, + )) frontend_ip = "" if isMinikube: - frontend_ip = frontend_service.spec.apply(lambda spec: spec.get("cluster_ip", "")) + frontend_ip = frontend_service.spec.apply(lambda spec: spec.cluster_ip or "") else: - ingress = frontend_service.status.apply(lambda status: status["load_balancer"]["ingress"][0]) - frontend_ip = ingress.apply(lambda ingress: ingress.get("ip", ingress.get("hostname", ""))) + ingress = frontend_service.status.apply(lambda status: status.load_balancer.ingress[0]) + frontend_ip = ingress.apply(lambda ingress: ingress.ip or ingress.hostname or "") pulumi.export("frontend_ip", frontend_ip) diff --git a/kubernetes-py-jenkins/__main__.py b/kubernetes-py-jenkins/__main__.py index 5f415c62e..4c53af366 100644 --- a/kubernetes-py-jenkins/__main__.py +++ b/kubernetes-py-jenkins/__main__.py @@ -11,11 +11,11 @@ instance = jenkins.Instance( name=pulumi.get_stack(), - credentials= { + credentials={ "username": config.require("username"), "password": config.require("password"), }, - resources= { + resources={ "memory": "512Mi", "cpu": "100m", }, diff --git a/kubernetes-py-jenkins/jenkins.py b/kubernetes-py-jenkins/jenkins.py index 4dbf7c5ae..245b1631e 100644 --- a/kubernetes-py-jenkins/jenkins.py +++ b/kubernetes-py-jenkins/jenkins.py @@ -1,15 +1,40 @@ # Copyright 2016-2020, Pulumi Corporation. All rights reserved. +from typing import NamedTuple + import base64 import pulumi from pulumi import ResourceOptions -from pulumi_kubernetes.apps.v1 import Deployment -from pulumi_kubernetes.core.v1 import PersistentVolumeClaim -from pulumi_kubernetes.core.v1 import Secret -from pulumi_kubernetes.core.v1 import Service +from pulumi_kubernetes.apps.v1 import Deployment, DeploymentSpecArgs +from pulumi_kubernetes.core.v1 import ( + ContainerArgs, + ContainerPortArgs, + EnvVarArgs, + EnvVarSourceArgs, + HTTPGetActionArgs, + PersistentVolumeClaim, + PersistentVolumeClaimSpecArgs, + PersistentVolumeClaimVolumeSourceArgs, + PodSpecArgs, + PodTemplateSpecArgs, + ProbeArgs, + ResourceRequirementsArgs, + Secret, + SecretKeySelectorArgs, + Service, + ServicePortArgs, + ServiceSpecArgs, + VolumeArgs, + VolumeMountArgs, +) +from pulumi_kubernetes.meta.v1 import LabelSelectorArgs, ObjectMetaArgs + +class DeploymentArgs(NamedTuple): + metadata: ObjectMetaArgs + spec: DeploymentSpecArgs -def createDeploymentArgs(name, credentials, resources, image=None): - image = image if image is not None else { +def create_deployment_args(name, credentials, resources, image=None) -> DeploymentArgs: + image = image if image is not None else { "registry": "docker.io", "repository": "bitnami/jenkins", "tag": "2.121.2", @@ -18,111 +43,112 @@ def createDeploymentArgs(name, credentials, resources, image=None): # This object is a projection of the Kubernetes object model into the Pulumi object model. # Its structure is derived from the Deployment object in the Kubernetes API. - return { - "metadata": { - "name": name, - }, - "spec": { - "replicas": 1, - "selector": { - "matchLabels": { + return DeploymentArgs( + ObjectMetaArgs(name=name), + DeploymentSpecArgs( + replicas=1, + selector=LabelSelectorArgs( + match_labels={ "app": name, }, - }, - "template": { - "metadata": { - "labels": { + ), + template=PodTemplateSpecArgs( + metadata=ObjectMetaArgs( + labels={ "app": name, }, - }, - "spec": { - "volumes": [ - { - "name": "jenkins-data", - "persistentVolumeClaim": { - "claimName": name, - }, - }, + ), + spec=PodSpecArgs( + volumes=[ + VolumeArgs( + name="jenkins-data", + persistent_volume_claim=PersistentVolumeClaimVolumeSourceArgs( + claim_name=name, + ), + ), ], - "containers": [ - { - "name": name, - "image": image["registry"] + "/" + image["repository"] + ":" + image["tag"], - "imagePullPolicy": image["pullPolicy"], - "env": [ - { - "name": "JENKINS_USERNAME", - "value": credentials["username"], - }, - { - "name": "JENKINS_PASSWORD", - "valueFrom": { - "secretKeyRef": { - "name": name, - "key": "jenkins-password", - }, - }, - }, + containers=[ + ContainerArgs( + name=name, + image=image["registry"] + "/" + image["repository"] + ":" + image["tag"], + image_pull_policy=image["pullPolicy"], + env=[ + EnvVarArgs( + name="JENKINS_USERNAME", + value=credentials["username"], + ), + EnvVarArgs( + name="JENKINS_PASSWORD", + value_from=EnvVarSourceArgs( + secret_key_ref=SecretKeySelectorArgs( + name=name, + key="jenkins-password", + ), + ), + ), ], - "ports": [ - { - "name": "http", - "containerPort": 8080, - }, - { - "name": "https", - "containerPort": 8443, - }, + ports=[ + ContainerPortArgs( + name="http", + container_port=8080, + ), + ContainerPortArgs( + name="https", + container_port=8443, + ), ], - "livenessProbe": { - "httpGet": { - "path": "/login", - "port": "http", - }, - "initialDelaySeconds": 180, - "timeoutSeconds": 5, - "failureThreshold": 6, - }, - "readinessProbe": { - "httpGet": { - "path": "/login", - "port": "http", - }, - "initialDelaySeconds": 90, - "timeoutSeconds": 5, - "periodSeconds": 6, - }, - "volumeMounts": [ - { - "name": "jenkins-data", - "mountPath": "/bitnami/jenkins", - }, + liveness_probe=ProbeArgs( + http_get=HTTPGetActionArgs( + path="/login", + port="http", + ), + initial_delay_seconds=180, + timeout_seconds=5, + failure_threshold=6, + ), + readiness_probe=ProbeArgs( + http_get=HTTPGetActionArgs( + path="/login", + port="http", + ), + initial_delay_seconds=90, + timeout_seconds=5, + period_seconds=6, + ), + volume_mounts=[ + VolumeMountArgs( + name="jenkins-data", + mount_path="/bitnami/jenkins", + ), ], - "resources": { - "requests": { + resources=ResourceRequirementsArgs( + requests={ "memory": resources["memory"], "cpu": resources["cpu"], }, - }, - }, # container - ], # containers - }, # spec - }, # template - }, # spec - } # deployment + ), + ), + ], + ), + ), + ) + ) + -# ComponentResource for a Jenkins instance running in a Kubernetes cluster. +class Instance(pulumi.ComponentResource): + """ + ComponentResource for a Jenkins instance running in a Kubernetes cluster. + """ -class Instance (pulumi.ComponentResource): def __init__(self, name, credentials, resources, image=None, opts=None): - super(Instance, self).__init__("jenkins:jenkins:Instance", name, {"credentials": credentials, "resources": resources, "image": image}, opts) + super().__init__("jenkins:jenkins:Instance", name, {"credentials": credentials, "resources": resources, "image": image}, opts) # The Secret will contain the root password for this instance. secret = Secret( name+"-secret", - metadata={ - "name": name, - }, + metadata=ObjectMetaArgs( + name=name, + ), type="Opaque", data={ "jenkins-password": str(base64.b64encode(bytes(credentials["password"],"utf-8"),None),"utf-8"), @@ -132,57 +158,57 @@ def __init__(self, name, credentials, resources, image=None, opts=None): # The PVC provides persistent storage for Jenkins states. pvc = PersistentVolumeClaim( - name+"-pvc", - metadata={ - "name": name, - }, - spec={ - "accessModes": ["ReadWriteOnce"], - "resources": { - "requests": { + name+"-pvc", + metadata=ObjectMetaArgs( + name=name, + ), + spec=PersistentVolumeClaimSpecArgs( + access_modes=["ReadWriteOnce"], + resources=ResourceRequirementsArgs( + requests={ "storage": "8Gi", }, - }, - }, + ), + ), opts=ResourceOptions(parent=self), ) # The Deployment describes the desired state for our Jenkins setup. - deploymentArgs = createDeploymentArgs(name, credentials, resources, image) + deploymentArgs = create_deployment_args(name, credentials, resources, image) deployment = Deployment( name+"-deploy", - metadata=deploymentArgs["metadata"], - spec=deploymentArgs["spec"], + metadata=deploymentArgs.metadata, + spec=deploymentArgs.spec, opts=ResourceOptions(parent=self), ) # The Service exposes Jenkins to the external internet by providing load-balanced ingress for HTTP and HTTPS. service = Service( name+"-service", - metadata={ - "name": name, - }, - spec={ - "type": "LoadBalancer", - "ports": [ - { - "name": "http", - "port": 80, - "targetPort": "http", - }, - { - "name": "https", - "port": 443, - "targetPort": "https", - }, + metadata=ObjectMetaArgs( + name=name, + ), + spec=ServiceSpecArgs( + type="LoadBalancer", + ports=[ + ServicePortArgs( + name="http", + port=80, + target_port="http", + ), + ServicePortArgs( + name="https", + port=443, + target_port="https", + ), ], - "selector": { + selector={ "app": name, }, - }, + ), opts=ResourceOptions(parent=self) ) - ingress = service.status["load_balancer"]["ingress"][0] - self.external_ip = ingress.apply(lambda x: x.get("ip", x["hostname"])) + ingress = service.status.apply(lambda s: s.load_balancer.ingress[0]) + self.external_ip = ingress.apply(lambda x: x.ip or x.hostname) self.register_outputs({"external_ip": self.external_ip}) diff --git a/kubernetes-py-nginx/__main__.py b/kubernetes-py-nginx/__main__.py index 343c31311..46d0f18f7 100644 --- a/kubernetes-py-nginx/__main__.py +++ b/kubernetes-py-nginx/__main__.py @@ -1,25 +1,27 @@ # Copyright 2016-2020, Pulumi Corporation. All rights reserved. import pulumi -from pulumi_kubernetes.apps.v1 import Deployment +from pulumi_kubernetes.apps.v1 import Deployment, DeploymentSpecArgs +from pulumi_kubernetes.core.v1 import ContainerArgs, ContainerPortArgs, PodSpecArgs, PodTemplateSpecArgs +from pulumi_kubernetes.meta.v1 import LabelSelectorArgs, ObjectMetaArgs config = pulumi.Config() -nginxLabels = { "app": "nginx" } +nginxLabels = {"app": "nginx"} nginxDeployment = Deployment( - "nginx-deployment", - spec={ - "selector": { "matchLabels": nginxLabels }, - "replicas": 2 if config.get_int("replicas") is None else config.get_int("replicas"), - "template": { - "metadata": { "labels": nginxLabels }, - "spec": { - "containers": [{ - "name": "nginx", - "image": "nginx:1.7.9", - "ports": [{ "containerPort": 80 }], - }], - }, - }, - }) + "nginx-deployment", + spec=DeploymentSpecArgs( + selector=LabelSelectorArgs(match_labels=nginxLabels), + replicas=2 if config.get_int("replicas") is None else config.get_int("replicas"), + template=PodTemplateSpecArgs( + metadata=ObjectMetaArgs(labels=nginxLabels), + spec=PodSpecArgs( + containers=[ContainerArgs( + name="nginx", + image="nginx:1.7.9", + ports=[ContainerPortArgs(container_port=80)], + )], + ), + ), + )) -pulumi.export("nginx", nginxDeployment.metadata["name"]) +pulumi.export("nginx", nginxDeployment.metadata.apply(lambda m: m.name))