From be85a772e224741fcce28e9c0dd14b8300b0a960 Mon Sep 17 00:00:00 2001 From: Sergei Trofimov Date: Tue, 17 Sep 2024 11:31:52 +0100 Subject: [PATCH] WIP --- deployments/aws/Makefile | 2 + deployments/aws/bin/veraison | 1120 +++++++++++++++++ deployments/aws/deployment.cfg | 41 + deployments/aws/deployment.sh | 222 ++++ deployments/aws/env/env.bash | 13 + deployments/aws/misc/requirements.txt | 8 + .../aws/templates/image-combined.pkr.hcl | 88 ++ deployments/aws/templates/stack-combined.yaml | 102 ++ end-to-end/end-to-end-aws | 153 +++ 9 files changed, 1749 insertions(+) create mode 100644 deployments/aws/Makefile create mode 100755 deployments/aws/bin/veraison create mode 100644 deployments/aws/deployment.cfg create mode 100755 deployments/aws/deployment.sh create mode 100644 deployments/aws/env/env.bash create mode 100644 deployments/aws/misc/requirements.txt create mode 100644 deployments/aws/templates/image-combined.pkr.hcl create mode 100644 deployments/aws/templates/stack-combined.yaml create mode 100755 end-to-end/end-to-end-aws diff --git a/deployments/aws/Makefile b/deployments/aws/Makefile new file mode 100644 index 00000000..bc4fb310 --- /dev/null +++ b/deployments/aws/Makefile @@ -0,0 +1,2 @@ +THIS_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +SCRIPT := $(THIS_DIR)/deployment.sh diff --git a/deployments/aws/bin/veraison b/deployments/aws/bin/veraison new file mode 100755 index 00000000..5045c088 --- /dev/null +++ b/deployments/aws/bin/veraison @@ -0,0 +1,1120 @@ +#!/usr/bin/env python +# pyright: reportOptionalMemberAccess=false +# pyright: reportOptionalSubscript=false +import argparse +import asyncio +import getpass +import inspect +import io +import logging +import os +import pprint +import re +import shutil +import socket +import stat +import sys +import tarfile +import time +from asyncio.subprocess import Process, PIPE +from copy import copy +from urllib.parse import urlparse + +import ar +import boto3 +import fabric +import requests +import xdg.BaseDirectory +import yaml +from botocore.exceptions import ClientError +from sqlitedict import SqliteDict + +COLOR_DARK_GREY = '\x1b[38;5;245m' +COLOR_GREY = '\x1b[38;20m' +COLOR_GREEN = '\x1b[38;5;2m' +COLOR_YELLOW = '\x1b[33;20m' +COLOR_RED = '\x1b[31;20m' +COLOR_BOLD_RED = '\x1b[31;1m' +COLOR_RESET = '\x1b[0m' + +class Aws: + + @property + def ec2(self): + if self._ec2 is None: + self._ec2 = self.session.client('ec2') + return self._ec2 + + @property + def cf(self): + if self._cf is None: + self._cf = self.session.client('cloudformation') + return self._cf + + def __init__(self, **kwargs): + self.session = boto3.Session(**kwargs) + self._ec2 = None + self._cf = None + + def close(self): + if self._ec2 is not None: + self._ec2.close() + self._ec2 = None + if self._cf is not None: + self._cf.close() + self._cf = None + + def __del__(self): + self.close() + + +def get_public_ip_address(): + resp = requests.get('http://ifconfig.me') + if resp.status_code != 200: + raise RuntimeError( + f'could not access http://ifconfig.me: {resp.reason} ({resp.status_code})') + return resp.text + + +def authorize_ports_for_address(aws, group_id, addr, ports, tag, deployment_tags): + permissions = [] + for port in ports: + permissions.append({ + 'FromPort': port, + 'ToPort': port, + 'IpProtocol': 'tcp', + 'IpRanges': [{'CidrIp': f'{addr}/32'}], + }) + + tags = copy(deployment_tags) + tags.append({'Key': 'dynamic-address', 'Value': tag}) + + aws.ec2.authorize_security_group_ingress( + GroupId=group_id, + IpPermissions=permissions, + TagSpecifications=[ + { + 'ResourceType': 'security-group-rule', + 'Tags': tags, + } + ], + ) + + +def revoke_security_group_rules_by_tag(aws, group_id, tag): + resp = aws.ec2.describe_security_group_rules( + Filters=[ + { + 'Name': 'group-id', + 'Values': [group_id], + }, + { + 'Name': 'tag:dynamic-address', + 'Values': [tag], + } + ], + ) + + rule_ids = [sgr['SecurityGroupRuleId'] for sgr in resp['SecurityGroupRules']] + if not rule_ids: + return + + aws.ec2.revoke_security_group_ingress( + GroupId=group_id, + SecurityGroupRuleIds=rule_ids, + ) + + +def update_dynamic_address_rules(aws, deployment_name, tag, ports): + resp = aws.ec2.describe_security_groups( + Filters=[{ + 'Name': 'tag:veraison-deployment', + 'Values': [deployment_name], + }] + ) + + group_ids = [sgr['GroupId'] for sgr in resp['SecurityGroups']] + if not group_ids: + return + + my_addr = get_public_ip_address() + + for group_id in group_ids: + revoke_security_group_rules_by_tag(aws, group_id, tag) + authorize_ports_for_address( + aws, group_id, my_addr, ports, tag, + deployment_tags=[{'Key': 'veraison-deployment', 'Value': deployment_name}]) + + +def get_deployment_info(aws, deployment_name): + resp = aws.cf.describe_stack_resources(StackName=deployment_name) + stack_resources = {sr['LogicalResourceId']: sr for sr in resp['StackResources']} + + instance_id = stack_resources['VeraisonInstance']['PhysicalResourceId'] + resp = aws.ec2.describe_instances(InstanceIds=[instance_id]) + instance = resp['Reservations'][0]['Instances'][0] + pub_iface = instance['NetworkInterfaces'][0]['Association'] + + return { + 'instance': { + 'id': instance_id, + 'dns_name': pub_iface["PublicDnsName"], + 'ip_address': pub_iface["PublicIp"], + }, + } + + +def get_ami_id(aws, name): + resp = aws.ec2.describe_images(Owners=['self']) + for image in resp['Images']: + if image['Name'] == name: + return image['ImageId'] + + +def run_in_shell(cmd, should_log): + logger = logging.getLogger('shell') + if should_log: + logger.setLevel(logging.DEBUG) + + loop = asyncio.new_event_loop() + try: + return loop.run_until_complete(_run_in_shell_teed(cmd, logger)) + finally: + loop.close() + + +async def _run_in_shell_teed(cmd, logger): + process: Process = await asyncio.create_subprocess_shell( + cmd, stdout=PIPE, stderr=PIPE, cwd=os.getcwd()) + + + stdout_buf, stderr_buf = [], [] + tasks = { + asyncio.Task(process.stdout.readline()): (process.stdout, stdout_buf), + asyncio.Task(process.stderr.readline()): (process.stderr, stderr_buf), + } + + while tasks: + done, _ = await asyncio.wait( + tasks, return_when=asyncio.FIRST_COMPLETED) # pyright: ignore[reportCallIssue] + for future in done: + stream, buf = tasks.pop(future) + line = future.result() + if line: + line = line.decode() + buf.append(line) + logger.debug(line.rstrip('\n')) + tasks[asyncio.Task(stream.readline())] = stream, buf # pyright: ignore[reportOptionalMemberAccess] + + rc = await process.wait() + return rc, ''.join(stdout_buf), ''.join(stderr_buf) + + +def command_get_vpc_id(cmd, args): + if args.vpc_id: + cmd.logger.debug(f'writing {args.vpc_id} to cache') + cmd.cache['vpc_id'] = args.vpc_id + return args.vpc_id + + vpc_id = cmd.cache.get('vpc_id') + if vpc_id: + cmd.logger.debug(f'using VPC ID from cache: {vpc_id}') + return vpc_id + + cmd.logger.debug('no VPC ID specified; trying to identify from account...') + resp = cmd.aws.ec2.describe_vpcs( + Filters=[{ + 'Name': 'state', + 'Values': ['available'], + }] + ) + if len(resp['Vpcs']) == 1: + vpc_id = resp['Vpcs'][0]['VpcId'] + cmd.cache['vpc_id'] = vpc_id + return vpc_id + elif len(resp['Vpcs']) > 1: + vpc_ids = ', '.join(vpc['VpcId'] for vpc in resp['Vpcs']) + cmd.fail(f'multiple VPCs found: {vpc_ids}; use --vpc-id to specify ' + 'which one should be used') + else: + cmd.fail('no VPCs found in the account') + + +def command_get_subnet_id(cmd, args): + if args.subnet_id: + cmd.logger.debug(f'writing {args.subnet_id} to cache') + cmd.cache['subnet_id'] = args.subnet_id + return args.subnet_id + + subnet_id = cmd.cache.get('subnet_id') + if subnet_id: + cmd.logger.debug(f'using subnet ID from cache: {subnet_id}') + return subnet_id + + cmd.logger.debug('no subnet ID specified; trying to identify from account...') + resp = cmd.aws.ec2.describe_subnets( + Filters=[{ + 'Name': 'state', + 'Values': ['available'], + }] + ) + if len(resp['Subnets']) == 1: + subnet_id = resp['Subnets'][0]['SubnetId'] + cmd.cache['subnet_id'] = subnet_id + return subnet_id + elif len(resp['Subnets']) > 1: + subnet_ids = ', '.join(subnet['SubnetId'] for subnet in resp['Subnets']) + cmd.fail(f'multiple subnets found: {subnet_ids}; use --subnet-id to specify ' + 'which one should be used') + else: + cmd.fail('no subnets found in the account') + + +def command_get_region(cmd, args, subnet_id=None): + region = cmd.cache.get('region') + if region: + return region + + if subnet_id is None: + subnet_id = command_get_subnet_id(cmd, args) + + resp = cmd.aws.ec2.describe_subnets(SubnetIds=[subnet_id]) + zone_id = resp['Subnets'][0]['AvailabilityZoneId'] + + resp = cmd.aws.ec2.describe_availability_zones(ZoneIds=[zone_id]) + region = resp['AvailabilityZones'][0]['RegionName'] + + cmd.cache['region'] = region + return region + + +def command_connect(cmd, instance_name, user='ubuntu', should_log=True): + instance = cmd.cache.get('instances', {}).get(instance_name) + if instance is None: + cmd.fail(f'could not find instance {instance_name} in cache') + + key_path = cmd.cache.get('key', {}).get('path') + if not key_path: + cmd.fail('couild not find key in cache') + + return fabric.Connection( + instance['dns_name'], + user=user, + connect_kwargs={ + 'key_filename': key_path, + }, + ) + + +def command_update_ca_cert_form_deb(cmd): + deb_path = cmd.cache.get('deb') + if not deb_path: + cmd.fail('deb not found') + + with open(deb_path, 'rb') as fh: + deb = ar.Archive(fh) + data_buf = io.BytesIO(deb.open('data.tar.xz', 'rb').read()) + tf = tarfile.open(fileobj=data_buf, mode='r:xz') + cert_fh = tf.extractfile('./opt/veraison/certs/rootCA.crt') + + dest_path = os.path.join(cmd.cache.dir, 'ca-cert.crt') + with open(dest_path, 'wb') as wfh: + wfh.write(cert_fh.read()) + + cmd.cache['ca-cert'] = dest_path + + +class DeploymentCache: + + @property + def dir(self): + return os.path.dirname(self.path) + + def __init__(self, name, cache_dir=None): + self.name = name + if cache_dir is None: + cache_dir = xdg.BaseDirectory.save_data_path('veraison/aws') + self.path = os.path.join(cache_dir, f'{self.name}.db') + self.db = SqliteDict(self.path) + + def get(self, key, default=None): + return self.db.get(key, default) + + def as_dict(self): + return {k: v for k, v in self.db.items()} + + def close(self): + self.db.close() + + def __getitem__(self, key): + return self.db[key] + + def __setitem__(self, key, value): + self.db[key] = value + self.db.commit() + + def __delitem__(self, key): + del self.db[key] + self.db.commit() + + +class StoreIntList(argparse.Action): + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, [int(v) for v in values.split(',')]) # pyright: ignore + + +class BaseCommand: + + name = None + desc = None + aliases = [] + + def __init__(self, aws): + self.aws = aws + self.logger = logging.getLogger(self.name) + self.cache = None + + def register(self, subparsers): + parser = subparsers.add_parser(self.name, help=self.desc, aliases=self.aliases) + self.update_arguments(parser) + + def execute(self, args): + if args.verbose: + self.logger.setLevel(logging.DEBUG) + + self.cache = DeploymentCache(args.deployment_name, args.cache_dir) + try: + self.run(args) + finally: + self.cache.close() + + def fail(self, message): + self.logger.error(message) + raise RuntimeError(f'command {self.name} failed.') + + def fail_shell(self, command, exit_code, stdout, stderr): + stdout_file = f'/tmp/{args.deployment_name}-{command}-failure.stdout' + with open(stdout_file, 'w') as wfh: + wfh.write(stdout) + + stderr_file = f'/tmp/{args.deployment_name}-{command}-failure.stderr' + with open(stderr_file, 'w') as wfh: + wfh.write(stderr) + + self.fail(f'{command} failed with {exit_code}' + f'\n\tSTDOUT is in {stdout_file}\n\tSTDERR is in {stderr_file}') + + def update_arguments(self, parser): + pass + + def run(self, *args, **kwargs): + raise NotImplementedError() + + +class CreateStackCommand(BaseCommand): + + name = 'create-stack' + desc = 'create deployment\'s cloudformation stack' + + def update_arguments(self, parser): + parser.add_argument('-a', '--admin-cidr') + parser.add_argument( + '-t', '--template-body', + default=os.path.abspath(os.path.join( + os.path.dirname(__file__), + '../templates/stack-combined.yaml', + )), + ) + parser.add_argument('-k', '--key-name') + parser.add_argument('-i', '--image-id') + parser.add_argument('-s', '--subnet-id') + parser.add_argument('-V', '--vpc-id') + + def run(self, args): + self.logger.info(f'creating stack {args.deployment_name}...') + + # doing this to be compatible with AWS CLI which specifies the template path as + # file://path/to/template. + url = urlparse(args.template_body) + self.logger.debug(f'template: {url.path}') + with open(url.path) as fh: + template = fh.read() + + params = [ + {'ParameterKey': 'DeploymentName', 'ParameterValue': args.deployment_name}, + {'ParameterKey': 'KeyName', 'ParameterValue': self._get_key_name(args)}, + {'ParameterKey': 'InstanceImage', 'ParameterValue': self._get_image(args)}, + {'ParameterKey': 'VpcId', 'ParameterValue': self._get_vpc_id(args)}, + {'ParameterKey': 'SubnetId', 'ParameterValue': self._get_subnet_id(args)}, + {'ParameterKey': 'AdminCidr', 'ParameterValue': self._get_cidr(args)}, + ] + + self.logger.debug(f'using params {params}') + resp = self.aws.cf.create_stack( + StackName=args.deployment_name, + TemplateBody=template, + Parameters=params, + OnFailure='DELETE', + ) + self.logger.debug(f'stack ID: {resp["StackId"]}') + + self.logger.debug('waiting for the stack creation to complete...') + resp = self.aws.cf.describe_stacks(StackName=args.deployment_name) + while resp['Stacks'][0]['StackStatus'] == 'CREATE_IN_PROGRESS': + time.sleep(args.wait_period) + resp = self.aws.cf.describe_stacks(StackName=args.deployment_name) + + stack_status = resp['Stacks'][0]['StackStatus'] + if stack_status == 'CREATE_COMPLETE': + self.logger.debug(f'getting info for {args.deployment_name}...') + deployment_info = get_deployment_info(self.aws, args.deployment_name) + instance = deployment_info['instance'] + self.logger.info(f'instance: {instance['dns_name']} ({instance['ip_address']})') + + self.logger.debug('updating cache') + instances = self.cache.get('instances', {}) + instances['combined'] = instance + self.cache['instances'] = instances + + self.logger.info('done.') + else: # stack_status != 'CREATE_COMPLETE' + self.logger.error(f'creation failed: {stack_status}') + resp = self.aws.cf.describe_stack_events(StackName=args.deployment_name) + + for event in resp['StackEvents']: + if event['ResourceStatus'] == 'CREATE_IN_PROGRESS': + break + status = event['ResourceStatus'] + reason = event.get("ResourceStatusReason", '') + self.logger.error(f'{status} {reason}') + + self.fail(f'could not create stack {args.deployment_name}') + + def _get_key_name(self, args): + if args.key_name: + return args.key_name + + key_info = self.cache.get('key') + if key_info: + return key_info['name'] + + self.fail('could not find key name (specify with --key-name or run ' + 'create-key-pair command)') + + def _get_image(self, args): + if args.image_id: + return args.image_id + + image_id = self.cache.get('images', {}).get(f'{args.deployment_name}-combined') + if image_id: + return image_id + + self.fail('could not find IMA image ID (specify with --image-id or run ' + 'create-image command') + + def _get_cidr(self, args): + if args.admin_cidr: + return args.admin_cidr + + return f'{get_public_ip_address()}/32' + + def _get_vpc_id(self, args): + return command_get_vpc_id(self, args) + + def _get_subnet_id(self, args): + return command_get_subnet_id(self, args) + + +class DeleteStackCommand(BaseCommand): + + name = 'delete-stack' + desc = 'delete deployment\'s stack' + + def run(self, args): + self.logger.info(f'deleting stack {args.deployment_name}...') + self.aws.cf.delete_stack(StackName=args.deployment_name) + + try: + self.logger.debug('waiting for the stack deletion to complete...') + resp = self.aws.cf.describe_stacks(StackName=args.deployment_name) + while resp['Stacks'][0]['StackStatus'] == 'DELETE_IN_PROGRESS': + time.sleep(args.wait_period) + resp = self.aws.cf.describe_stacks(StackName=args.deployment_name) + except ClientError as e: + if 'does not exist' not in str(e): + raise e + + self.logger.debug('updating cache') + instances = self.cache.get('instances', {}) + del instances['combined'] + self.cache['instances'] = instances + + self.logger.info('done.') + + +class UpdateSecurityGroups(BaseCommand): + + name = 'update-security-groups' + desc = 'update security group(s) in deployment with current host\'s IP address' + + def update_arguments(self, parser): + default_tag = f'{socket.gethostname()}-{getpass.getuser()}' + parser.add_argument('-t', '--tag', default=default_tag) + parser.add_argument('-p', '--ports', action=StoreIntList, default=[8888, 8080, 8088, 22]) + + def run(self, args): + self.logger.info('updating deployment security groups with IP address for this host') + + try: + update_dynamic_address_rules(self.aws, args.deployment_name, args.tag, args.ports) + except Exception as e: + self.fail(e) + + self.logger.info('done.') + + +class CreateImage(BaseCommand): + + name = 'create-image' + desc = 'create IMA image for the Veraison services EC2 instance' + + def update_arguments(self, parser): + parser.add_argument('-D', '--deb') + parser.add_argument('-s', '--subnet-id') + parser.add_argument( + '-t', '--template', + default=os.path.abspath(os.path.join( + os.path.dirname(__file__), + '../templates/image-combined.pkr.hcl', + )), + ) + parser.add_argument('-T', '--instance-type') + parser.add_argument('-V', '--vpc-id') + + def run(self, args): + if not shutil.which('packer'): + self.fail('packer must be installed on the system') + + if not os.path.isfile(args.template): + self.fail(f'template {args.template} does not exist') + + deb_path = args.deb or self.cache['deb'] + if not os.path.isfile(deb_path): + self.fail(f'{deb_path} does not exist') + self.cache['deb'] = deb_path + + name = f'{args.deployment_name}-combined' + self.logger.info(f'creating image: {name}...') + + self.logger.debug('checking for existing AMI with that name...') + existing_id = get_ami_id(self.aws, name) + if existing_id: + if not args.force: + self.fail(f'image {name} already exits (use -f to overwrite)') + self.logger.info('removing existing image...') + self.aws.ec2.deregister_image(ImageId=existing_id) + + self.logger.info('building using packer...') + subnet_id = command_get_subnet_id(self, args) + region = command_get_region(self, args, subnet_id) + packer_build_args = ' '.join(f'-var {k}={v}' for k, v in { + 'ami_name': name, + 'deb': deb_path, + 'deployment_name': args.deployment_name, + 'instance_type': args.instance_type, + 'region': region, + 'vpc_id': command_get_vpc_id(self, args), + 'subnet_id': subnet_id, + }.items() if v is not None) + + packer_cmd = f'packer build {packer_build_args} {args.template}' + self.logger.debug(packer_cmd) + exit_code, stdout, stderr = run_in_shell(packer_cmd, args.verbose) + if exit_code: + self.fail_shell('packer', exit_code, stdout, stderr) + + regex = re.compile(r'AMI: (?Pami-\w+)') + match = regex.search(stdout) + if not match: + self.fail('could not find AMI ID in packer output') + + images = self.cache.get('images', {}) + images[name] = match.group('id') + self.cache['images'] = images + + self.logger.info('done.') + + +class DeleteImage(BaseCommand): + + name = 'delete-image' + desc = 'delete IMA image for the Veraison services EC2 instance' + + def run(self, args): + name = f'{args.deployment_name}-combined' + images = self.cache.get('images', {}) + iid = images.get(name) + if iid is None: + self.fail(f'no entry for image {name} found in the deployment cache') + + self.logger.info(f'deleting image {name} ({iid})...') + self.aws.ec2.deregister_image(ImageId=iid) + + self.logger.debug(f'removing image {name} from cache') + del images[name] + self.cache['images'] = images + + self.logger.info('done.') + + +class CreateKeyPair(BaseCommand): + + name = 'create-key-pair' + desc = 'create a key pair that will be used for SSH access to the deployment\'s instances' + + def update_arguments(self, parser): + parser.add_argument('-n', '--key-name') + parser.add_argument('-t', '--key-type', choices=['rsa', 'ed25519'], default='rsa') + + def run(self, args): + key_info = self.cache.get('key') + if key_info: + self.fail(f'key pair for {args.deployment_name} already exits: ' + f'{key_info['name']} ({key_info['id']})') + + name = args.key_name or os.getenv('VERAISON_AWS_KEY') or args.deployment_name + + self.logger.info(f'creating key pair {name} for {args.deployment_name}...') + resp = aws.ec2.create_key_pair( + KeyName=name, + KeyType=args.key_type, + KeyFormat='pem', + TagSpecifications=[ + { + 'ResourceType': 'key-pair', + 'Tags': [ + {'Key': 'veraison-deployment', 'Value': args.deployment_name}, + ], + } + ], + ) + + path = os.path.join(self.cache.dir, f'{name}_{args.key_type}') + self.logger.info(f'writing private key to {path}') + with open(path, 'w') as wfh: + wfh.write(resp['KeyMaterial']) + os.chmod(path, stat.S_IRUSR | stat.S_IWUSR) + + self.cache['key'] ={ + 'name': name, + 'id': resp['KeyPairId'], + 'fingerprint': resp['KeyFingerprint'], + 'path': path, + } + + self.logger.info('done.') + + +class DeleteKeyPair(BaseCommand): + + name = 'delete-key-pair' + desc = 'create a key pair that will be used for SSH access to the deployment\'s instances' + + def run(self, args): + self.logger.info(f'deleting key pair for {args.deployment_name}...') + key_info = self.cache.get('key') + if key_info: + if os.path.isfile(key_info['path']): + self.logger.debug(f'deleting {key_info['path']}') + os.remove(key_info['path']) + else: + self.logger.debug(f'{key_info['path']} not found (already deleted?)') + self.logger.debug(f'deleting AWS key pair {key_info['name']} ({key_info['id']})') + self.aws.ec2.delete_key_pair(KeyPairId=key_info['id']) + del self.cache['key'] + self.logger.info('done.') + else: + self.logger.debug('no key info cached; checking VERAISON_AWS_KEY') + key_name = os.getenv('VERAISON_AWS_KEY') + if key_name: + self.logger.debug(f'deleting AWS key pair {key_name}') + self.aws.ec2.delete_key_pair(KeyName=key_name) + else: + self.logger.debug('VERAISON_AWS_KEY not specified; search for key ' + f'tagged with {args.deployment_name}') + resp = self.aws.ec2.describe_key_pairs( + Filters=[{ + 'Name': 'tag:veraison-deployment', + 'Values': [ + args.deployment_name, + ], + }], + ) + + if len(resp['KeyPairs']) == 1: + name = resp['KeyPairs'][0]['KeyName'] + kid = resp['KeyPairs'][0]['KeyPairId'] + self.logger.debug(f'deleting AWS key pair {name} ({kid})') + self.aws.ec2.delete_key_pair(KeyPairId=kid) + else: + if len(resp['KeyPairs']) > 1: + names = ', '.join([kp['KeyName'] for kp in resp['KeyPairs']]) + self.logger.error(f'multiple key pairs for {args.deployment_name} found ' + f'({names}). Specify key name using VERAISON_AWS_KEY') + else: + self.logger.error(f'no key pairs found for {args.deployment_name}') + + self.fail(f'could not delete key pair for {args.deployment_name}') + + self.logger.info('done. (local files not touched)') + + +class CreateDeb(BaseCommand): + + name = 'create-deb' + desc = 'create the Veraison Debian package' + + def update_arguments(self, parser): + parser.add_argument( + '-s', '--veraison-src', + help='path to Veraison services source; if not specified, ' + 'it will be guess based on this script\'s location', + ) + parser.add_argument( + '-w', '--work-dir', default='/tmp', + help='this will be used as the working directory when creating the .deb. ' + 'Upon completion, t will contain the intermediate artifacts.', + ) + + def run(self, args): + src_root = args.veraison_src + if src_root is None: + src_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')) + + script = os.path.join(src_root, 'deployments/debian/deployment.sh') + if not os.path.isfile(script): + self.fail(f'script {script} does not exist') + + self.logger.info(f'creating Debian package under {args.work_dir}...') + create_deb_cmd = f'{script} create-deb {args.work_dir}' + self.logger.debug(create_deb_cmd) + exit_code, stdout, stderr = run_in_shell(create_deb_cmd, args.verbose) + if exit_code: + self.fail_shell('deb creation', exit_code, stdout, stderr) + + regex = re.compile(r"building package 'veraison' in '(?P[^']+)'") + match = regex.search(stdout) + if not match: + self.fail(f'could not find deb path in script output') + + deb_path = match.group('deb_path') + dest_path = os.path.join(args.cache_dir, os.path.basename(deb_path)) + self.logger.debug(f'moving {deb_path} to {dest_path}') + shutil.move(deb_path, dest_path) + + self.logger.debug('updating deployment cache') + self.cache['deb'] = dest_path + + self.logger.info(f'created {dest_path}') + + self.logger.info('extracting ca-cert...') + command_update_ca_cert_form_deb(self) + + self.logger.info('done.') + + +class DeleteDeb(BaseCommand): + + name = 'delete-deb' + desc = 'delete perviously created Debian package' + + def run(self, args): + self.logger.info('deleting cached Debian package...') + + deb_path = self.cache.get('deb') + if not deb_path: + self.fail('could not find deb in cache') + + self.logger.debug(f'removing deb {deb_path}') + os.remove(deb_path) + del self.cache['deb'] + + cert_path = self.cache.get('ca-cert') + if cert_path: + self.logger.debug(f'removing ca-cert {cert_path}') + os.remove(cert_path) + del self.cache['ca-cert'] + + self.logger.info('done.') + + +class Shell(BaseCommand): + + name = 'shell' + desc = 'start a shell on a deployment instance' + + def update_arguments(self, parser): + parser.add_argument('-k', '--ssh-key') + parser.add_argument('-i', '--instance' , default='combined', choices=['combined']) + parser.add_argument('-u', '--user', default='ubuntu') + parser.add_argument('-s', '--server-alive-interval', type=int, default=60) + + def run(self, args): + if not shutil.which('ssh'): + self.fail('ssh does not appear to be installed on the system') + + instance = self.cache.get('instances', {}).get(args.instance) + if not instance: + self.fail(f'{instance} instance not in cache; has the correct stack been created?') + + if args.ssh_key: + key = args.ssh_key + else: + key = self.cache.get('key', {}).get('path') + + if not key: + self.fail(f'key not found in cache specify with --ssh-key') + + ssh_opts = '-o StrictHostKeyChecking=no' + if args.server_alive_interval: + ssh_opts += f' -o ServerAliveInterval={args.server_alive_interval}' + + ssh_cmd = f'ssh {ssh_opts} -i {key} ubuntu@{instance['dns_name']}' + self.logger.debug(ssh_cmd) + os.system(ssh_cmd) + + +class UpdateCerts(BaseCommand): + + name = 'update-certs' + description = 'update SSL certs in the combine instance to reflect its DNS name' + + def run(self, args): + with command_connect(self, 'combined') as con: + self.logger.info(f'updating certs for {con.host}...') + + cmd = ( + f'/opt/veraison/bin/veraison -f gen-service-certs {con.host},localhost ' + '/opt/veraison/certs/rootCA.{crt,key}' + ) + self.logger.debug(cmd) + # pty=True combines stdout and stderr + res = con.sudo(cmd, echo=False, hide=True, pty=True) + for line in res.stdout.split('\n'): + if line: + self.logger.debug(line) + + self.logger.debug('restating veraison services') + con.sudo('/opt/veraison/bin/veraison -s stop-services', + echo=False, hide=True, pty=True) + con.sudo('/opt/veraison/bin/veraison -s start-services', + echo=False, hide=True, pty=True) + + self.logger.info('done.') + + +class CreateClientConfig(BaseCommand): + + name = 'create-client-config' + desc = ''' + create configuration for Veraison clients to access the deployment + ''' + all_clients = ['cocli', 'evcli', 'pocli'] + + def update_arguments(self, parser): + parser.add_argument('-c', '--client', action='append', choices=self.all_clients) + parser.add_argument('-o', '--output-dir', default=xdg.BaseDirectory.xdg_config_home) + + def run(self, args): + self.logger.info('creating Veraison client config(s)...') + cert_path = self.cache.get('ca-cert') + if not cert_path: + self.fail('could not find ca-cert in cache') + + with command_connect(self, 'combined') as con: + self.logger.debug(f'getting services config from {con.host}...') + res = con.run( + 'cat /opt/veraison/config/services/config.yaml', + echo=False, hide=True, + ) + if res.exited != 0: + self.fail(f'could not read services config; got {res.exited}: {res.stderr}') + + clients = args.client or self.all_clients + for client in clients: + self.logger.info(f'generating {client} config...') + outdir = os.path.join(args.output_dir, client) + if not os.path.isdir(outdir): + self.logger.debug(f'creating {outdir}') + os.makedirs(outdir) + + srv_cfg = yaml.safe_load(res.stdout) + config = getattr(self, f'get_{client}_config')(srv_cfg, con.host, cert_path) + + outfile = os.path.join(outdir, 'config.yaml') + self.logger.debug(f'writing {outfile}') + with open(outfile, 'w') as wfh: + yaml.dump(config, wfh) + + self.cache['client_config_dir'] = args.output_dir + self.logger.info('done.') + + def get_cocli_config(self, srv_cfg, host, cert_path): + port = int(srv_cfg['provisioning']['listen-addr'].split(':')[1]) + return { + 'ca_cert': cert_path, + 'api_server': f'https://{host}:{port}/endorsement-provisioning/v1/submit', + } + + def get_evcli_config(self, srv_cfg, host, cert_path): + port = int(srv_cfg['verification']['listen-addr'].split(':')[1]) + return { + 'ca_cert': cert_path, + 'api_server': f'https://{host}:{port}/challenge-response/v1/newSession', + } + + def get_pocli_config(self, srv_cfg, host, cert_path): + port = int(srv_cfg['management']['listen-addr'].split(':')[1]) + return { + 'ca_cert': cert_path, + 'tls': True, + 'host': host, + 'port': port, + } + + +class Cache(BaseCommand): + + name = 'cache' + desc = 'show cached info for the deployment' + + def run(self, args): + print(f'deployment: {args.deployment_name}') + pprint.pp(self.cache.as_dict()) + + +class CheckStores(BaseCommand): + name = 'check-stores' + desc = 'output the contents of deployment\'s sqlite3 stores' + aliases = ['stores'] + + def run(self, args): + with command_connect(self, 'combined') as con: + res = con.run('/opt/veraison/bin/veraison check-stores', + echo=False, hide=True, pty=True) + if res.exited != 0: + self.fail(f'could not read stores; got {res.exited}: {res.stdout}') + print(res.stdout.rstrip('\n')) + + +class Status(BaseCommand): + + name = 'status' + desc = 'show status of the deployment' + + def run(self, args): + print(f'deployment: {args.deployment_name}') + vpc_id = self.cache.get('vpc_id', f'{COLOR_DARK_GREY}none{COLOR_RESET}') + print(f' vpc: {vpc_id}') + subnet_id = self.cache.get('subnet_id', f'{COLOR_DARK_GREY}none{COLOR_RESET}') + print(f' subnet: {subnet_id}') + + instance = self.cache.get('instances', {}).get('combined') + if not instance: + print(f' instance: {COLOR_DARK_GREY}not created{COLOR_RESET}') + return + + host = instance['dns_name'] + addr = instance['ip_address'] + + try: + with command_connect(self, 'combined') as con: + res = con.run('/opt/veraison/bin/veraison -s status', echo=False, hide=True) + print(f' instance: {host} ({addr}) {COLOR_GREEN}up{COLOR_RESET}') + print(f' services:') + print(res.stdout.rstrip('\n')) + except Exception as e: + self.logger.debug(f'error connecting to instance: {e}') + print(f' instance: {host} ({addr}) {COLOR_RED}down{COLOR_RESET}') + + +class ClearStores(BaseCommand): + name = 'clear-stores' + desc = 'clear the contents of deployment\'s sqlite3 stores' + + def run(self, args): + with command_connect(self, 'combined') as con: + res = con.sudo('/opt/veraison/bin/veraison clear-stores', + user='veraison', echo=False, hide=True, pty=True) + if res.exited != 0: + self.fail(f'could not clear stores; got {res.exited}: {res.stdout}') + + +class LogFormatter(logging.Formatter): + + fmt = f'{{}}%(asctime)s %(name)s %(levelname)s{COLOR_RESET}: %(message)s' + + level_formats = { + logging.DEBUG: fmt.format(COLOR_DARK_GREY), + logging.INFO: fmt.format(COLOR_GREY), + logging.WARNING: fmt.format(COLOR_YELLOW), + logging.ERROR: fmt.format(COLOR_RED), + logging.CRITICAL: fmt.format(COLOR_BOLD_RED), + } + + def format(self, record): + log_fmt = self.level_formats.get(record.levelno) + formatter = logging.Formatter(log_fmt) + return formatter.format(record) + + +if __name__ == '__main__': + handler = logging.StreamHandler() + handler.setLevel(logging.DEBUG) + handler.setFormatter(LogFormatter()) + logging.basicConfig(level=logging.INFO, handlers=[handler]) + logging.getLogger('botocore').setLevel(logging.WARNING) + logging.getLogger("paramiko").setLevel(logging.WARNING) + + aws = Aws( + aws_access_key_id=os.getenv('AWS_ACCESS_KEY'), + aws_secret_access_key=os.getenv('AWS_SECRET_KEY'), + aws_session_token=os.getenv('AWS_SESSION_TOKEN'), + profile_name=os.getenv('AWS_PROFILE'), + ) + + cmd_map = {} + for name, cmd_cls in inspect.getmembers( + sys.modules[__name__], + lambda x: inspect.isclass(x) and issubclass(x, BaseCommand) and x is not BaseCommand): + if not name[0].isupper(): + continue # ignore variable bindings + assert cmd_cls.name, f'{cmd_cls} does not define a name' + cmd = cmd_cls(aws) + assert cmd.name not in cmd_map, f'duplicate name {cmd.name}' + cmd_map[cmd.name] = cmd + for alias in cmd.aliases: + assert alias not in cmd_map, f'duplicate alias {alias}' + cmd_map[alias] = cmd + + parser = argparse.ArgumentParser() + parser.add_argument('-d', '--deployment-name', default='veraison-deployment') + parser.add_argument('-f', '--force', action='store_true') + parser.add_argument('-W', '--wait-period', type=int, default=1) + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument( + '--cache-dir', default=xdg.BaseDirectory.save_data_path('veraison/aws'), + ) + + subparsers = parser.add_subparsers(dest='command', required=True) + for name, command in cmd_map.items(): + if name == command.name: + command.register(subparsers) + + args = parser.parse_args() + cmd = cmd_map[args.command] + try: + cmd.execute(args) + except Exception as e: + cmd.logger.critical(f'{e.__class__.__name__}: {e}') diff --git a/deployments/aws/deployment.cfg b/deployments/aws/deployment.cfg new file mode 100644 index 00000000..5a5dd316 --- /dev/null +++ b/deployments/aws/deployment.cfg @@ -0,0 +1,41 @@ +############################################################################## +# Veraison Deployment Configuration +# +# Note: this uses Bash syntax, however there is no need to export variables +# here, as this file will be sourced with set -a +# +# Note: in addition to settings here, you need to configure access to your +# AWS account. Please see: +# +# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html +# +############################################################################## +# shellcheck disable=SC2034 + +# The ports on which services will be listening. +VTS_PORT=${VTS_PORT:-50051} +PROVISIONING_PORT=${PROVISIONING_PORT:-8888} +VERIFICATION_PORT=${VERIFICATION_PORT:-8080} +MANAGEMENT_PORT=${MANAGEMENT_PORT:-8088} + +# The location of the Python venv that will be used to run the deployment +# script. This venv must have appropriate dependencies installed (see +# misc/requirements.txt). +VERAISON_AWS_VENV=${VERAISON_AWS_VENV:-~/venv/aws} + +# The name of the deployment. This will be used to name the CloudFormation +# stack. Additionally, all crated resources will have a tag with key +# "veraison-deployment" and this value. +VERAISON_AWS_DEPLOYMENT=${VERAISON_AWS_DEPLOYMENT:-veraison-deployment} + +# ID of the VPC into which the deployment will be created. This must exist in +# the account. +VERAISON_AWS_VPC_ID=${VERAISON_AWS_VPC_ID:-} + +# ID of the subment inside which the deployment instances will run. This must +# exist in the VPC. +VERAISON_AWS_SUBNET_ID=${VERAISON_AWS_SUBNET_ID:-} + +# Instances' security groups will be configures to allow connections from this +# CIDR. +VERAISON_AWS_ADMIN_CIDR=${VERAISON_AWS_ADMIN_CIDR:-217.140.96.0/20} diff --git a/deployments/aws/deployment.sh b/deployments/aws/deployment.sh new file mode 100755 index 00000000..16021087 --- /dev/null +++ b/deployments/aws/deployment.sh @@ -0,0 +1,222 @@ +#!/bin/bash +set -ueo pipefail + +_error='\e[0;31mERROR\e[0m' +_this_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +_repo_root=$(realpath "${_this_dir}/../..") + +set -a +source "${_this_dir}/deployment.cfg" +set +a + +_script=${_this_dir}/bin/veraison + +function help() { + set +e + local usage + read -r -d '' usage <<-EOF + Usage: deployment.sh [OPTIONS...] COMMAND [ARGS...] + + This script allows deploying Veraison to AWS + XXX TODO XXX + + OPTIONS: + + Please note tht opitons MUST be specified before the command and arguments. + + -h show this message and exist + + COMMANDS: + + help + Show this message and exit. The same as -h option. + + bootstrap + Initialize the Python venv that will be used to run the deployment script. + + EOF + set -e + + echo "$usage" +} + +function bootstrap() { + "${_repo_root}/deployments/debian/deployment.sh" bootstrap + + case $( uname -s ) in + Linux) + # shellcheck disable=SC2002 + local distrib_id + distrib_id=$(head -n 1 /dev/null | \ + cut -f2 -d= | tr -d \") + + case $distrib_id in + Arch) sudo pacman -Syy packer ssh;; + Ubuntu) + sudo apt --yes install curl + curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - + sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" + sudo apt update && sudo apt --yes install packer + ;; + *) + echo -e "$_error: Boostrapping is currently only supported for Arch and Ubuntu." + exit + ;; + esac + ;; + Darwin) + if ! type brew > /dev/null; then + echo -e "$_error: homebrew (https://brew.sh) must be installed." + exit 1 + fi + brew install packer + ;; + *) + echo -e "$_error: Boostrapping is currently only supported for Arch, Ubuntu, and MacOSX (via homebrew)." + exit + ;; + esac + + python -m venv "${VERAISON_AWS_VENV}" + # shellcheck disable=SC1091 + source "${VERAISON_AWS_VENV}/bin/activate" + pip install -r "${_this_dir}/misc/requirements.txt" + + set +e + local message + read -r -d '' message <<-EOF + + Enviroment for AWS deployment has been bootstraped. To activate it: + + source ${_this_dir}/env/env.bash + + EOF + set -e + + echo "$message" +} + +function create_image() { + _check_installed packer + + veraison create-image --vpc-id "${VERAISON_AWS_VPC_ID}" \ + --subnet-id "${VERAISON_AWS_SUBNET_ID}" +} + +function delete_image() { + veraison delete-image +} + +function create_key_pair() { + veraison create-key-pair +} + +function delete_key_pair() { + veraison delete-key-pair +} + +function create_deb() { + veraison create-deb +} + +function cache() { + veraison cache +} + +function create_stack() { + veraison create-stack --vpc-id "${VERAISON_AWS_VPC_ID}" \ + --subnet-id "${VERAISON_AWS_SUBNET_ID}" \ + --admin-cidr "${VERAISON_AWS_ADMIN_CIDR}" +} + +function delete_stack() { + veraison delete-stack +} + +function update_security_groups() { + veraison update-security-groups +} + +function update_certs() { + veraison update-certs +} + +function bringup() { + _check_installed packer + + veraison create-deb + veraison create-key-pair + veraison create-image --vpc-id "${VERAISON_AWS_VPC_ID}" \ + --subnet-id "${VERAISON_AWS_SUBNET_ID}" + veraison create-stack --vpc-id "${VERAISON_AWS_VPC_ID}" \ + --subnet-id "${VERAISON_AWS_SUBNET_ID}" \ + --admin-cidr "${VERAISON_AWS_ADMIN_CIDR}" + veraison update-security-groups + veraison update-certs +} + +function teardown() { + veraison delete-stack + veraison delete-image + veraison delete-key-pair + veraison delete-deb +} + +function veraison() { + "${_script}" "${_script_flags[@]}" "${@}" +} + +function _check_installed() { + local what=$1 + + if [[ "$(type -p "$what")" == "" ]]; then + echo -e "$_error: $what executable must be installed to use this command." + exit 1 + fi +} + +_force=false +_verbose=false + +while getopts "hfv" opt; do + case "$opt" in + h) help; exit 0;; + f) _force=true;; + v) _verbose=true;; + *) break;; + esac +done + +shift $((OPTIND-1)) +[ "${1:-}" = "--" ] && shift + +_script_flags=(--deployment-name "${VERAISON_AWS_DEPLOYMENT}") +if [[ $_force == true ]]; then + _script_flags+=(--force) +fi +if [[ $_verbose == true ]]; then + _script_flags+=(--verbose) +fi + +_check_installed python + +_command=$1; shift +_command=$(echo "$_command" | tr -- _ -) +case $_command in + help) help;; + bootstrap) bootstrap;; + bringup) bringup;; + teardown) teardown;; + create-image) create_image;; + delete-image) delete_image;; + create-key-pair | create-key) create_key_pair;; + delete-key-pair | delete-key) delete_key_pair;; + create-stack) create_stack;; + delete-stack) delete_stack;; + create-deb) create_deb;; + update-security-groups) update_security_groups;; + update-certs) update_certs;; + cache) cache;; + *) echo -e "$_error: unexpected command: \"$_command\"";; +esac +# vim: set noet sts=8 sw=8: diff --git a/deployments/aws/env/env.bash b/deployments/aws/env/env.bash new file mode 100644 index 00000000..e562901e --- /dev/null +++ b/deployments/aws/env/env.bash @@ -0,0 +1,13 @@ +_this_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +_deployment_root="${_this_dir}/.." +_deployment_cfg="${_deployment_root}/deployment.cfg" + +set -a +# shellcheck source=../deployment.cfg +source "${_deployment_cfg}" +set +a + +# shellcheck disable=SC1091 +source "${VERAISON_AWS_VENV}/bin/activate" + +export PATH="${_deployment_root}/bin":${PATH} diff --git a/deployments/aws/misc/requirements.txt b/deployments/aws/misc/requirements.txt new file mode 100644 index 00000000..9032808c --- /dev/null +++ b/deployments/aws/misc/requirements.txt @@ -0,0 +1,8 @@ +ar==1.0.0 +boto3==1.35.8 +botocore==1.35.8 +fabric==3.2.2 +pyxdg==0.28 +PyYAML==6.0.2 +requests==2.32.3 +sqlitedict==2.1.0 diff --git a/deployments/aws/templates/image-combined.pkr.hcl b/deployments/aws/templates/image-combined.pkr.hcl new file mode 100644 index 00000000..94c63eec --- /dev/null +++ b/deployments/aws/templates/image-combined.pkr.hcl @@ -0,0 +1,88 @@ +packer { + required_plugins { + amazon = { + version = ">= 1.2.8" + source = "github.com/hashicorp/amazon" + } + } +} + +variable "deployment_name" { + type = string +} + +variable "ami_name" { + type = string +} + +variable "vpc_id" { + type = string +} + +variable "region" { + type = string + default = "eu-west-1" +} + +variable "instance_type" { + type = string + default = "t2.micro" +} + +variable "subnet_id" { + type = string +} + +variable "deb" { + type = string +} + +locals { + dest_deb = "/tmp/${basename(var.deb)}" +} + +source "amazon-ebs" "ubuntu" { + ami_name = "${var.ami_name}" + instance_type = "${var.instance_type}" + region = "${var.region}" + vpc_id = "${var.vpc_id}" + subnet_id = "${var.subnet_id}" + associate_public_ip_address = true + tags = { + veraison-deployment = "${var.deployment_name}" + } + source_ami_filter { + filters = { + name = "ubuntu/images/*ubuntu-jammy-22.04-amd64-server-*" + root-device-type = "ebs" + virtualization-type = "hvm" + architecture = "x86_64" + } + owners = ["099720109477"] # amazon + most_recent = true + } + ssh_username = "ubuntu" +} + +build { + name = "veraison-combined" + sources = [ + "source.amazon-ebs.ubuntu" + ] + + provisioner "file" { + source = "${var.deb}" + destination = "${local.dest_deb}" + } + + provisioner "shell" { + inline = [ + "sudo dpkg -i ${local.dest_deb} 2>&1", + "sudo apt-get update", + "sudo apt-get install --yes sqlite3 jq 2>&1", + "echo \"\nsource /opt/veraison/env/env.bash\" >> ~/.bashrc " + ] + } +} + +# vim: set et sts=2 sw=2: diff --git a/deployments/aws/templates/stack-combined.yaml b/deployments/aws/templates/stack-combined.yaml new file mode 100644 index 00000000..0e625d55 --- /dev/null +++ b/deployments/aws/templates/stack-combined.yaml @@ -0,0 +1,102 @@ +# Veraison stack +AWSTemplateFormatVersion: 2010-09-09 +Description: Veraison attestation verfication services + +Parameters: + # mandatory parameters (no defaults): + DeploymentName: + Type: String + Description: | + The name of this deployment. Defaults to the Cloudfromation stack name + VpcId: + Description: ID for the VPC into which Veraison will be deployed + Type: AWS::EC2::VPC::Id + KeyName: + Type: AWS::EC2::KeyPair::KeyName + Description: | + The name of an EC2 key pair that will be used to provide access to the + instance. + InstanceImage: + Type: String + Description: ID of the AMI image to be used for the instance. + AdminCidr: + Type: String + Description: CIDR to used to configure remote access + SubnetId: + Type: String + Description: ID of the subnet to be used for veraison deployment + + # optional parameters (have a default if are not specfied): + ProvisioningPort: + Type: Number + Description: TCP port on which the provisioning service is listening + Default: 8888 + VerificationPort: + Type: Number + Description: TCP port on which the verification service is listening + Default: 8080 + ManagementPort: + Type: Number + Description: TCP port on which the management service is listening + Default: 8088 + ServiceInstanceType: + Description: An EC2 instance type that will be used to run EC2 Instances + Type: String + Default: t2.micro + # TODO(setrofm): technicall, there is a set of AllowedValues that should be + # specified here (i.e. a valid type name), but since there is a ton of + # them, and right now I'm not sure which subset of those would even make + # sense for service instance, I'm leaving this unconstrained for now. + + +Resources: + + VeraisonSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + VpcId: !Ref VpcId + GroupName: veraison-services + GroupDescription: Veraison services access + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: !Ref AdminCidr + - IpProtocol: tcp + FromPort: !Ref ProvisioningPort + ToPort: !Ref ProvisioningPort + CidrIp: !Ref AdminCidr + - IpProtocol: tcp + FromPort: !Ref VerificationPort + ToPort: !Ref VerificationPort + CidrIp: !Ref AdminCidr + - IpProtocol: tcp + FromPort: !Ref ManagementPort + ToPort: !Ref ManagementPort + CidrIp: !Ref AdminCidr + Tags: + - Key: veraison-deployment + Value: !Ref DeploymentName + + VeraisonInstance: + Type: AWS::EC2::Instance + Properties: + KeyName: !Ref KeyName + InstanceType: !Ref ServiceInstanceType + ImageId: !Ref InstanceImage + SubnetId: !Ref SubnetId + SecurityGroupIds: + - !GetAtt VeraisonSecurityGroup.GroupId + Tags: + - Key: veraison-deployment + Value: !Ref DeploymentName + + VeraisonIpAddress: + Type: AWS::EC2::EIP + DependsOn: VeraisonInstance + Properties: + Domain: vpc + InstanceId: !Ref VeraisonInstance + Tags: + - Key: veraison-deployment + Value: !Ref DeploymentName diff --git a/end-to-end/end-to-end-aws b/end-to-end/end-to-end-aws new file mode 100755 index 00000000..bd4557a7 --- /dev/null +++ b/end-to-end/end-to-end-aws @@ -0,0 +1,153 @@ +#!/bin/bash +# Copyright 2024 Contributors to the Veraison project. +# SPDX-License-Identifier: Apache-2.0 + +SCHEME=${SCHEME:-psa} +CONFIG_DIR=/tmp/veraison-aws-client-configs + +THIS_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source "${THIS_DIR}/../deployments/aws/env/env.bash" + +if [[ ! -d "$CONFIG_DIR" ]]; then + veraison --deployment-name "${VERAISON_AWS_DEPLOYMENT}" create-client-config --output-dir "$CONFIG_DIR" +fi + +function provision() { + case $SCHEME in + psa) + local corim_file=$THIS_DIR/input/psa-endorsements.cbor + local media_type="application/corim-unsigned+cbor; profile=\"http://arm.com/psa/iot/1\"" + ;; + cca) + local corim_file=$THIS_DIR/input/cca-endorsements.cbor + local media_type="application/corim-unsigned+cbor; profile=\"http://arm.com/cca/ssd/1\"" + ;; + *) + echo "${_error}: bad SCHEME: $SCHEME" + exit 1 + ;; + esac + + set -x + cocli corim submit --config="${CONFIG_DIR}/cocli/config.yaml" \ + --corim-file="$corim_file" --media-type="$media_type" +} + +function check() { + veraison --deployment-name "${VERAISON_AWS_DEPLOYMENT}" stores +} + +function verify_as_attester() { + case $SCHEME in + psa) + local claims="$THIS_DIR/input/psa-claims-profile-2-without-nonce.json" + local key_args="--key=$THIS_DIR/input/ec-p256.jwk" + local nonce_args="--nonce-size=32" + ;; + cca) + local claims="$THIS_DIR/input/cca-claims-without-realm-challenge.json" + local key_args="--iak=$THIS_DIR/input/ec256.json --rak=$THIS_DIR/input/ec384.json" + local nonce_args="" + ;; + *) + echo "${_error}: bad SCHEME: $SCHEME" + exit 1 + ;; + esac + + set -x + evcli "$SCHEME" verify-as attester --config="${CONFIG_DIR}/evcli/config.yaml" \ + --claims="$claims" $key_args $nonce_args +} + +function verify_as_relying_party() { + case $SCHEME in + psa) + local token=$THIS_DIR/input/psa-evidence.cbor + ;; + cca) + local token=$THIS_DIR/input/cca-evidence.cbor + ;; + *) + echo "${_error}: bad SCHEME: $SCHEME" + exit 1 + ;; + esac + + set -x + evcli "$SCHEME" verify-as relying-party --config="${CONFIG_DIR}/evcli/config.yaml" \ + --token="$token" +} + +function verify() { + as=$1 + + case $as in + attester | attest) verify_as_attester;; + rp | relying-party) verify_as_relying_party;; + *) echo "ERROR unexected \"as\": \"$as\"; should be either \"attester\" or \"rp\"" + esac +} + +function help() { +cat <<'EOF' +Usage: ./end-to-end-native [command] + +Command: + + provision + Provision endorsements and trust anchors via the provisioning API. + check | check-stores + Check the contents of the endorsement and trust anchor stores. + verify [ attester | rp ] + Verify claims via verification API either as an attester or the relying party. + +EOF +} + +function _check_scheme() { + case $SCHEME in + psa | cca) + ;; + *) + echo "${_error}: unknown SCHEME: '$SCHEME'; must be 'cca' or 'psa'"; exit 1 + ;; + esac +} + +function _check_installed() { + local what=$1 + + if [[ "$(type -p "$what")" == "" ]]; then + echo -e "$_error: $what executable must be installed to use this command." + exit 1 + fi +} + +_error='\e[0;31mERROR\e[0m' + +while getopts "hs:" opt; do + case "$opt" in + h) help; exit 0;; + s) SCHEME="$OPTARG";; + *) break;; + esac +done + +shift $((OPTIND-1)) +[ "${1:-}" = "--" ] && shift + +_check_scheme +_check_installed cocli +_check_installed evcli + +command=$1 +case $command in + help) help;; + provision) provision;; + check | check-stores) check;; + verify) verify "$2";; + *) echo "${_error}: unexpected command: \"$command\""; help;; +esac +# vim: set et sts=4 sw=4