diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 260b76727..1e1e3b259 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,11 +1,17 @@ ## :memo: Summary -This PR closes/completes/contributes to issue #ANPL-... - +This PR resolves ... + This PR ... - @@ -27,4 +33,4 @@ Merging this PR will have the following side-effects: - [ ] No changes to the documentation are required - [ ] This PR includes all relevant documentation -- [ ] Documentation will be added in the future because ... (see #ANPL-...) +- [ ] Documentation will be added in the future because ... (see issue #...) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a59fb520b..eedbd2c4f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,12 +29,3 @@ repos: - id: flake8 name: flake8 format check entry: bash -c 'flake8 --config=.flake8 $(git diff --name-only --cached --diff-filter=ACMR | grep .py)' - - - repo: local - hooks: - - id: jira-ticket - name: Check for Jira ticket - language: pygrep - entry: '\A(?!ANPL+-[0-9]+)' - args: [--multiline] - stages: [commit-msg] diff --git a/Dockerfile b/Dockerfile index e0851cc5a..42f6b1fb0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,6 +29,11 @@ RUN apt-get update \ && apt-get install -y --no-install-recommends \ postgresql-client \ wget \ + gcc \ + libcurl4-gnutls-dev \ + python3-dev \ + libgnutls28-dev \ + libssl-dev \ && rm -rf /var/lib/apt/lists/* WORKDIR /home/controlpanel diff --git a/controlpanel/__init__.py b/controlpanel/__init__.py index e69de29bb..15d7c5085 100644 --- a/controlpanel/__init__.py +++ b/controlpanel/__init__.py @@ -0,0 +1,5 @@ +# This will make sure the app is always imported when +# Django starts so that shared_task will use this app. +from .celery import app as celery_app + +__all__ = ('celery_app',) diff --git a/controlpanel/api/admin.py b/controlpanel/api/admin.py index c64022b71..22d6bcf05 100644 --- a/controlpanel/api/admin.py +++ b/controlpanel/api/admin.py @@ -24,8 +24,8 @@ class AppAdmin(admin.ModelAdmin): class S3Admin(admin.ModelAdmin): - list_display = ("name", "created_by", "created", "is_data_warehouse") - list_filter = ("created_by", "is_data_warehouse") + list_display = ("name", "created_by", "created", "is_data_warehouse", "is_deleted") + list_filter = ("created_by", "is_data_warehouse", "is_deleted") search_fields = ("name",) diff --git a/controlpanel/api/auth0.py b/controlpanel/api/auth0.py index 54c29e12d..25c16bd1d 100644 --- a/controlpanel/api/auth0.py +++ b/controlpanel/api/auth0.py @@ -47,6 +47,8 @@ class ExtendedAuth0(Auth0): DEFAULT_GRANT_TYPES = ["authorization_code", "client_credentials"] DEFAULT_APP_TYPE = "regular_web" + DEFAULT_CONNECTION_OPTION = 'email' + def __init__(self, **kwargs): self.client_id = kwargs.get("client_id", settings.AUTH0["client_id"]) self.client_secret = kwargs.get( @@ -156,7 +158,7 @@ def setup_auth0_client( } """ if connections is None: - connections = {"email": {}} + connections = {self.DEFAULT_CONNECTION_OPTION: {}} new_connections = self._create_custom_connection(client_name, connections) app_url = "https://{}.{}".format( app_url_name or client_name, app_domain or self.app_domain) @@ -178,7 +180,18 @@ def setup_auth0_client( ) role = self.roles.create(dict(name="app-viewer", applicationId=client_id)) self.roles.add_permission(role, view_app["_id"]) - group = self.groups.create(dict(name=client_name)) + try: + group = self.groups.create(dict(name=client_name)) + except exceptions.Auth0Error as exc: + # celery fails to unpickle original exception, but not 100% sure why. + # Seems to be because __reduce__ method is incorrect? Possible bug. + # https://github.com/celery/celery/issues/6990#issuecomment-1433689294 + # TODO what should happen if group already exists? Raise new error and + # catch in the worker? e.g.: + # raise Auth0Error(detail=exc.message, code=exc.error_code) + # Or get the group ID and continue? + group = dict(_id=self.groups.get_group_id(client_name)) + self.groups.add_role(group["_id"], role["_id"]) self._enable_connections_for_new_client( @@ -277,7 +290,7 @@ def update_client_auth_connections( so we have to get all social connections, then check whether the client (client_id) is in the list of enabled_clients """ - connections = {"email": {}} if new_conns is None else new_conns + connections = {self.DEFAULT_CONNECTION_OPTION: {}} if new_conns is None else new_conns new_connections = self._create_custom_connection(app_name, connections) # Get the list of removed connections based on the existing connections @@ -512,7 +525,12 @@ def create_custom_connection(self, connection_name: str, input_values: dict()): body = yaml.safe_load(yaml_rendered) or defaultdict(dict) body["options"]["scripts"] = scripts_rendered - self.create(body) + try: + self.create(body) + except exceptions.Auth0Error as error: + # Skip the exception when the connection name existed already + if error.status_code != 409: + raise Auth0Error(error.__str__(), code=error.status_code) return input_values["name"] diff --git a/controlpanel/api/auth0_conns/auth0_nomis/fetchUserProfile.js b/controlpanel/api/auth0_conns/auth0_nomis/fetchUserProfile.js index df80b7688..edd73a7ca 100644 --- a/controlpanel/api/auth0_conns/auth0_nomis/fetchUserProfile.js +++ b/controlpanel/api/auth0_conns/auth0_nomis/fetchUserProfile.js @@ -1,37 +1,14 @@ -function(accessToken, ctx, cb) { - var base_url = "{{gateway_url}}"; - var user_endpoint = "/auth/api/user/me"; - var user_profile_url = base_url + user_endpoint; - - // call oauth2 API with the accesstoken and create the profile - request.get( - user_profile_url, - { - headers: { - Authorization: "Bearer " + accessToken - } - }, - function(err, resp, body) { - if (err) { - cb(err); - return; - } - if (!/^2/.test("" + resp.statusCode)) { - cb(body); - return; - } - let parsedBody = JSON.parse(body); - let profile = { - user_id: parsedBody.staffId, - nickname: parsedBody.name, - name: parsedBody.name, - email: parsedBody.username + "+" + parsedBody.activeCaseLoadId + "@nomis", - username: parsedBody.username, - blocked: !parsedBody.active, - activeCaseLoadId: parsedBody.activeCaseLoadId, - _nomisAccessToken: accessToken - }; - cb(null, profile); - } - ); +function fetchUserProfile(accessToken, context, callback) { + // The email is only for auth0 usage purpose, not the actual email of login user + const profile = { + sub: context.sub, + user_id: context.user_id, + auth_source: context.auth_source, + nickname: context.name, + name: context.name, + username: context.user_name, + _accessToken: accessToken, + email: context.user_name + "+" + context.user_id + "@" + context.auth_source, + }; + callback(null, profile); } diff --git a/controlpanel/api/aws.py b/controlpanel/api/aws.py index 2b85e0188..8dedaa030 100644 --- a/controlpanel/api/aws.py +++ b/controlpanel/api/aws.py @@ -1,5 +1,6 @@ # Standard library import base64 +import hashlib import json import re from copy import deepcopy @@ -133,6 +134,7 @@ def iam_arn(resource, account=settings.AWS_DATA_ACCOUNT_ID): class S3AccessPolicy: """Provides a convenience wrapper around a RolePolicy object""" + SID_SUFFIX_LEN = 32 def __init__(self, policy): self.policy = policy @@ -156,6 +158,15 @@ def __init__(self, policy): if sid in self.base_s3_access_sids: stmt.update(deepcopy(BASE_S3_ACCESS_STATEMENT[sid])) self.statements[sid] = stmt + continue + + # check for the SID without md5 suffix + if sid[:-self.SID_SUFFIX_LEN] in self.base_s3_access_sids: + stmt.update( + deepcopy(BASE_S3_ACCESS_STATEMENT[sid[:-self.SID_SUFFIX_LEN]]) + ) + stmt["Sid"] = sid + self.statements[sid] = stmt @property def base_s3_access_sids(self): @@ -165,16 +176,56 @@ def load_policy_document(self): # triggers API call return self.policy.policy_document - def statement(self, sid): - if sid in self.base_s3_access_sids: - if sid not in self.statements: - stmt = deepcopy(BASE_S3_ACCESS_STATEMENT[sid]) - self.statements[sid] = stmt - self.policy_document["Statement"].append(stmt) - return self.statements[sid] + def _build_statement(self, base_sid, sid): + """ + Build and store a new statement dictionary copied from the + BASE_S3_ACCESS_STATEMENT. + If a suffix is given this is appended to the Sid of the new statement block. + """ + statement = deepcopy(BASE_S3_ACCESS_STATEMENT[base_sid]) + statement["Sid"] = sid + self.statements[sid] = statement + self.policy_document["Statement"].append(statement) + return statement - def add_resource(self, arn, sid): - statement = self.statement(sid) + @staticmethod + def _construct_sid(base_sid, suffix=None): + """ + If a suffix is given this is used to create a md5 hash in order to meet AWS + requirements for the Sid, which supports only uppercase letters (A-Z), lowercase + letters (a-z), and numbers (0-9). + """ + if not suffix: + return base_sid + + suffix = hashlib.md5(suffix.encode()).hexdigest() + return f"{base_sid}{suffix}" + + def statement(self, base_sid, suffix=None): + """ + Get or build the statement element for the given Sid and suffix. + + :param str base_sid: should be one of the Sid defined that are defined in the + BASE_S3_ACCESS_STATEMENT + :param suffix: Optional arg, if given will be used when constructing the Sid + that is used to lookup or build a statement element. + :type suffix: str or None + """ + if base_sid not in self.base_s3_access_sids: + return + + sid = self._construct_sid(base_sid, suffix) + statement = self.statements.get(sid, None) + if not statement: + statement = self._build_statement( + base_sid=base_sid, + sid=sid + ) + + return statement + + def add_resource(self, arn, sid, sid_suffix=None): + statement = self.statement(sid, sid_suffix) if statement: statement["Resource"] = statement.get("Resource", []) if arn not in statement["Resource"]: @@ -197,6 +248,44 @@ def _is_arn_part_of_resource(self, resource, arn): return True return False + def remove_prefix(self, root_folder_path, sid, condition): + """ + Remove the folder name from the prefixes condition of a statement block. The + prefixes condition is used to limit list access to specific folders in an S3 + bucket, so by removing the folder name from the prefixes it removes access to + that folder. + + :param str root_folder_path: Path to the root folder including bucket name e.g. + user-data-bucket/my-folder + :param str sid: Statement ID + :param str condition: Condition operator. Should be StringEquals or StringLike + """ + # split the path into the bucket name and the folder name + bucket_name, folder = self.get_bucket_and_path(root_folder_path) + statement = self.statement(sid, suffix=bucket_name) + if not statement: + return + + # build arn for the bucket to check that it is included in the statements + # resource element + bucket_arn = s3_arn(resource=bucket_name) + if bucket_arn not in statement.get("Resource", []): + return + + try: + prefixes = statement["Condition"][condition]["s3:prefix"] + except KeyError: + prefixes = [] + + # remove access to the folder + prefixes[:] = [ + prefix for prefix in prefixes if not prefix.startswith(folder) + ] + + # remove the resource if no prefixes left so that the statement is removed + if prefixes == [] or prefixes == [""]: + statement.pop("Resource", None) + def remove_resource(self, arn, sid): statement = self.statement(sid) if statement: @@ -213,16 +302,29 @@ def grant_object_access(self, arn, access_level): def grant_list_access(self, arn): self.add_resource(arn, "list") - def _add_folder_to_list_folder_prefixes(self, folder): - statement = self.statement("listFolder") + def _add_folder_to_list_folder_prefixes(self, folder, bucket_name): + statement = self.statement("listFolder", suffix=bucket_name) + # make sure that we are updating statement for the correct bucket + if s3_arn(bucket_name) not in statement["Resource"]: + return + try: prefixes = statement["Condition"]["StringEquals"]["s3:prefix"] except KeyError: prefixes = [""] + subfolders = folder.split("/")[:-1] + for index, path in enumerate(subfolders): + prev = "/".join(subfolders[:index]) + if prev: + path = f"{prev}/{path}" + + if path not in prefixes: + prefixes.append(path) + prefixes.append(f"{path}/") + if folder not in prefixes: prefixes.append(folder) - prefixes.append(f"{folder}/") statement["Condition"] = { "StringEquals": { @@ -231,8 +333,13 @@ def _add_folder_to_list_folder_prefixes(self, folder): } } - def _add_folder_to_list_sub_folders_prefixes(self, folder): - statement = self.statement("listSubFolders") + def _add_folder_to_list_sub_folders_prefixes(self, folder, bucket_name): + statement = self.statement("listSubFolders", suffix=bucket_name) + + # make sure that we are updating statement for the correct bucket + if s3_arn(bucket_name) not in statement["Resource"]: + return + try: prefixes = statement["Condition"]["StringLike"]["s3:prefix"] except KeyError: @@ -248,28 +355,62 @@ def _add_folder_to_list_sub_folders_prefixes(self, folder): } } - def grant_folder_list_access(self, arn): + @staticmethod + def get_bucket_and_path(folder_path): """ - Splits the resource arn to get the bucket ARN and folder name, then for all - permissions required for folder list access makes sure the ARN is added as the - resource, and folder name is used in the statement condition prefixes so that - access if only granted to the specific folder and sub folders in the S3 bucket. - For a detailed breakdown of folder-level permissions see the docs: - https://aws.amazon.com/blogs/security/writing-iam-policies-grant-access-to-user-specific-folders-in-an-amazon-s3-bucket/ # noqa + Splits a path on the first / to return the name of the bucket and rest of + the path. """ - arn, folder = arn.split("/") - # required to avoid warnings when accessing AWS console - self.add_resource(arn, "rootFolderBucketMeta") - self.add_resource(arn, "listFolder") - self._add_folder_to_list_folder_prefixes(folder) - self.add_resource(arn, "listSubFolders") - self._add_folder_to_list_sub_folders_prefixes(folder) + return folder_path.split("/", 1) + + def grant_folder_access(self, root_folder_path, access_level, paths=None): + """ + Grant access to the given folder for the given access level. If paths have been + specified, only grants access to those paths within the root folder. + """ + bucket_name, folder = self.get_bucket_and_path(root_folder_path) + bucket_arn = s3_arn(bucket_name) + # make sure the root bucket arn is included in the resources + self.add_resource(bucket_arn, "rootFolderBucketMeta") + self.add_resource(bucket_arn, "listFolder", sid_suffix=bucket_name) + self.add_resource(bucket_arn, "listSubFolders", sid_suffix=bucket_name) + + folder_paths = [folder] + if paths: + folder_paths = [f"{folder}{path}" for path in paths] + + for folder_path in folder_paths: + self.grant_object_access( + arn=f"{bucket_arn}/{folder_path}", + access_level=access_level, + ) + self._add_folder_to_list_folder_prefixes(folder_path, bucket_name) + self._add_folder_to_list_sub_folders_prefixes(folder_path, bucket_name) def revoke_access(self, arn): self.remove_resource(arn, "readonly") self.remove_resource(arn, "readwrite") self.remove_resource(arn, "list") + def revoke_folder_access(self, root_folder_path): + # build arn for full path, including the folder name. important to include the + # folder name, as this is the resource string that is used when granting access + bucket_name, _ = self.get_bucket_and_path(root_folder_path) + folder_resource_arn = s3_arn(root_folder_path) + self.remove_resource(arn=folder_resource_arn, sid="readonly") + self.remove_resource(arn=folder_resource_arn, sid="readwrite") + self.remove_resource(arn=s3_arn(bucket_name), sid="rootFolderBucketMeta") + self.remove_prefix( + root_folder_path=root_folder_path, + sid="listFolder", + condition="StringEquals", + ) + self.remove_prefix( + root_folder_path=root_folder_path, + sid="listSubFolders", + condition="StringLike", + ) + def put(self, policy_document=None): if policy_document is None: policy_document = self.policy_document @@ -307,16 +448,19 @@ def save_policy_document(self, policy_document): class AWSService: - def __init__(self, assume_role_name=None, profile_name=None): + def __init__(self, assume_role_name=None, profile_name=None, region_name=None): self.assume_role_name = assume_role_name self.profile_name = profile_name + self.region_name = region_name or settings.AWS_DEFAULT_REGION self.aws_sessions = AWSCredentialSessionSet() @property def boto3_session(self): return self.aws_sessions.get_session( - assume_role_name=self.assume_role_name, profile_name=self.profile_name + assume_role_name=self.assume_role_name, + profile_name=self.profile_name, + region_name=self.region_name ) @@ -375,14 +519,19 @@ def grant_bucket_access(self, role_name, bucket_arn, access_level, path_arns=Non policy.grant_object_access(arn, access_level) policy.put() - def grant_folder_access(self, role_name, bucket_arn, access_level): + def grant_folder_access(self, role_name, root_folder_path, access_level, paths): + if access_level not in ("readonly", "readwrite"): raise ValueError("access_level must be one of 'readwrite' or 'readonly'") role = self.boto3_session.resource("iam").Role(role_name) policy = S3AccessPolicy(role.Policy("s3-access")) - policy.grant_folder_list_access(bucket_arn) - policy.grant_object_access(bucket_arn, access_level) + policy.revoke_folder_access(root_folder_path=root_folder_path) + policy.grant_folder_access( + root_folder_path=root_folder_path, + access_level=access_level, + paths=paths, + ) policy.put() def revoke_bucket_access(self, role_name, bucket_arn=None): @@ -403,6 +552,20 @@ def revoke_bucket_access(self, role_name, bucket_arn=None): policy.revoke_access(bucket_arn) policy.put() + def revoke_folder_access(self, role_name, root_folder_path): + try: + role = self.boto3_session.resource("iam").Role(role_name) + role.load() + except botocore.exceptions.ClientError as e: + if e.response["Error"]["Code"] == "NoSuchEntity": + log.warning(f"Role '{role_name}' doesn't exist: Nothing to revoke") + return + raise e + + policy = S3AccessPolicy(role.Policy("s3-access")) + policy.revoke_folder_access(root_folder_path=root_folder_path) + policy.put() + class AWSFolder(AWSService): @staticmethod @@ -427,6 +590,27 @@ def exists(self, folder_name): except botocore.exceptions.ClientError: return False + def get_objects(self, bucket_name, folder_name): + bucket = self.boto3_session.resource("s3").Bucket(bucket_name) + return bucket.objects.filter(Prefix=f"{folder_name}/") + + def archive_object(self, key, source_bucket_name=None, delete_original=True): + source_bucket_name = source_bucket_name or settings.S3_FOLDER_BUCKET_NAME + copy_source = { + 'Bucket': source_bucket_name, + 'Key': key + } + archive_bucket = self.boto3_session.resource("s3").Bucket( + settings.S3_ARCHIVE_BUCKET_NAME + ) + new_key = f"{archive_bucket.name}/{key}" + + archive_bucket.copy(copy_source, new_key) + log.info(f"Moved {key} to {new_key}") + if delete_original: + self.boto3_session.resource("s3").Object(source_bucket_name, key).delete() + log.info(f"deleted original: {source_bucket_name}/{key}") + class AWSBucket(AWSService): def create(self, bucket_name, is_data_warehouse=False): @@ -619,6 +803,23 @@ def delete_policy(self, policy_arn): policy.delete() + def grant_folder_access( + self, policy_arn, root_folder_path, access_level, paths=None + ): + + if access_level not in ("readonly", "readwrite"): + raise ValueError("access_level must be one of 'readwrite' or 'readonly'") + + policy = self.boto3_session.resource("iam").Policy(policy_arn) + policy = ManagedS3AccessPolicy(policy) + policy.revoke_folder_access(root_folder_path=root_folder_path) + policy.grant_folder_access( + root_folder_path=root_folder_path, + access_level=access_level, + paths=paths, + ) + policy.put() + def grant_policy_bucket_access( self, policy_arn, bucket_arn, access_level, path_arns=None ): @@ -646,6 +847,12 @@ def revoke_policy_bucket_access(self, policy_arn, bucket_arn=None): policy.revoke_access(bucket_arn) policy.put() + def revoke_policy_folder_access(self, policy_arn, root_folder_path): + policy = self.boto3_session.resource("iam").Policy(policy_arn) + policy = ManagedS3AccessPolicy(policy) + policy.revoke_folder_access(root_folder_path=root_folder_path) + policy.put() + class AWSParameterStore(AWSService): def __init__(self, assume_role_name=None, profile_name=None): @@ -808,3 +1015,112 @@ def get_secret_if_found(self, secret_name: str) -> Optional[dict]: if self.has_existed(secret_name): return self.get_secret(secret_name) return {} + + +class AWSSQS(AWSService): + + def __init__(self, assume_role_name=None, profile_name=None): + super(AWSSQS, self).__init__( + assume_role_name=assume_role_name, + profile_name=profile_name, + region_name=settings.SQS_REGION + ) + self.client = self.boto3_session.resource("sqs") + + def get_queue(self, name): + """ + Gets an SQS queue by name. + + :param name: The name that was used to create the queue. + :return: A Queue object. + """ + try: + queue = self.client.get_queue_by_name(QueueName=name) + log.info("Got queue '%s' with URL=%s", name, queue.url) + except botocore.exceptions.ClientError as error: + log.exception("Couldn't get queue named %s.", name) + raise error + else: + return queue + + def send_message(self, queue_name, message_body, message_attributes=None): + """ + Send a message to an Amazon SQS queue. + + :param queue_name: The queue that receives the message. + :param message_body: The body text of the message. + :param message_attributes: Custom attributes of the message. These are key-value + pairs that can be whatever you want. + :return: The response from SQS that contains the assigned message ID. + """ + if not message_attributes: + message_attributes = {} + + queue = self.get_queue(name=queue_name) + try: + response = queue.send_message( + MessageBody=message_body, + MessageAttributes=message_attributes + ) + except botocore.exceptions.ClientError as error: + log.exception("Send message failed: %s", message_body) + raise error + else: + return response + + def receive_messages(self, queue_name, max_number, wait_time): + """ + Receive a batch of messages in a single request from an SQS queue. + + :param queue_name: The queue from which to receive messages. + :param max_number: The maximum number of messages to receive. The actual number + of messages received might be less. + :param wait_time: The maximum time to wait (in seconds) before returning. When + this number is greater than zero, long polling is used. This + can result in reduced costs and fewer false empty responses. + :return: The list of Message objects received. These each contain the body + of the message and metadata and custom attributes. + """ + queue = self.get_queue(name=queue_name) + try: + messages = queue.receive_messages( + MessageAttributeNames=['All'], + MaxNumberOfMessages=max_number, + WaitTimeSeconds=wait_time + ) + for msg in messages: + log.info("Received message: %s: %s", msg.message_id, msg.body) + except botocore.exceptions.ClientError as error: + log.exception("Couldn't receive messages from queue: %s", queue_name) + raise error + else: + return messages + + def delete_messages(self, queue, messages): + """ + Delete a batch of messages from a queue in a single request. + + :param queue: The queue from which to delete the messages. + :param messages: The list of messages to delete. + :return: The response from SQS that contains the list of successful and failed + message deletions. + """ + try: + entries = [{ + 'Id': str(ind), + 'ReceiptHandle': msg.receipt_handle + } for ind, msg in enumerate(messages)] + response = queue.delete_messages(Entries=entries) + if 'Successful' in response: + for msg_meta in response['Successful']: + log.info("Deleted %s", messages[int(msg_meta['Id'])].receipt_handle) + if 'Failed' in response: + for msg_meta in response['Failed']: + log.warning( + "Could not delete %s", + messages[int(msg_meta['Id'])].receipt_handle + ) + except botocore.exceptions.ClientError: + log.exception("Couldn't delete messages from queue %s", queue) + else: + return response diff --git a/controlpanel/api/aws_auth.py b/controlpanel/api/aws_auth.py index 97b8b0d76..ee0881c18 100644 --- a/controlpanel/api/aws_auth.py +++ b/controlpanel/api/aws_auth.py @@ -113,8 +113,13 @@ class AWSCredentialSessionSet(metaclass=SingletonMeta): def __init__(self): self.credential_sessions = {} - def get_session(self, profile_name: str = None, assume_role_name: str = None): - credential_session_key = "{}_{}".format(profile_name, assume_role_name) + def get_session( + self, + profile_name: str = None, + assume_role_name: str = None, + region_name: str = None): + credential_session_key = "{}_{}_{}".format( + profile_name, assume_role_name, region_name) if credential_session_key not in self.credential_sessions: log.warn( "(for monitoring purpose) Initialising the session ({})".format( @@ -122,6 +127,8 @@ def get_session(self, profile_name: str = None, assume_role_name: str = None): ) ) self.credential_sessions[credential_session_key] = BotoSession( - profile_name=profile_name, assume_role_name=assume_role_name + region_name=region_name, + profile_name=profile_name, + assume_role_name=assume_role_name ).refreshable_session() return self.credential_sessions[credential_session_key] diff --git a/controlpanel/api/cluster.py b/controlpanel/api/cluster.py index e46160960..e13ba3c88 100644 --- a/controlpanel/api/cluster.py +++ b/controlpanel/api/cluster.py @@ -340,19 +340,26 @@ def delete(self): self.aws_role_service.delete_role(self.user.iam_role_name) self.delete_user_helm_charts() - def grant_bucket_access(self, bucket_arn, access_level, path_arns=[]): + def grant_bucket_access(self, bucket_arn, access_level, path_arns=None): + path_arns = path_arns or [] self.aws_role_service.grant_bucket_access( self.iam_role_name, bucket_arn, access_level, path_arns ) - def grant_folder_access(self, bucket_arn, access_level): + def grant_folder_access(self, root_folder_path, access_level, paths): self.aws_role_service.grant_folder_access( - self.iam_role_name, bucket_arn, access_level + role_name=self.iam_role_name, + root_folder_path=root_folder_path, + access_level=access_level, + paths=paths, ) def revoke_bucket_access(self, bucket_arn): self.aws_role_service.revoke_bucket_access(self.iam_role_name, bucket_arn) + def revoke_folder_access(self, root_folder_path): + self.aws_role_service.revoke_folder_access(self.iam_role_name, root_folder_path) + def has_required_installation_charts(self): """Checks if the expected helm charts exist for the user.""" installed_helm_charts = helm.list_releases(namespace=self.k8s_namespace) @@ -627,6 +634,8 @@ def create_auth_settings( ): client = None group = None + connections = connections or \ + {auth0.ExtendedAuth0.DEFAULT_CONNECTION_OPTION: {}} if not disable_authentication: client, group = self._get_auth0_instance().setup_auth0_client( client_name=self.app.auth0_client_name(env_name), @@ -640,7 +649,7 @@ def create_auth_settings( self._create_env_vars( env_name, disable_authentication, - connections or [], + connections, client=client, ) return client, group @@ -655,6 +664,20 @@ def remove_auth_settings(self, env_name): self._get_auth0_instance().clear_up_app(self.app.get_auth_client(env_name)) self.app.clear_auth_settings(env_name) + def update_auth_connections(self, env_name, new_conns): + existing_conns = self.app.auth0_connections(env_name=env_name) + self.create_or_update_env_var( + env_name=env_name, + key_name=self.AUTH0_PASSWORDLESS, + key_value=auth0.ExtendedAuth0.DEFAULT_CONNECTION_OPTION in new_conns + ) + auth0.ExtendedAuth0().update_client_auth_connections( + app_name=self.app.auth0_client_name(env_name), + client_id=self.app.get_auth_client(env_name).get("client_id"), + new_conns=new_conns, + existing_conns=existing_conns, + ) + def remove_redundant_env(self, env_name): self._get_auth0_instance().clear_up_app(self.app.get_auth_client(env_name)) self.app.clear_auth_settings(env_name) @@ -708,9 +731,21 @@ def _init_aws_services(self): self.aws_bucket_service = self.create_aws_service(self.aws_service_class) def exists(self, folder_name, bucket_owner): + # TODO this assumes only one multi root bucket folder_path = f"{settings.S3_FOLDER_BUCKET_NAME}/{folder_name}" return super().exists(folder_path, bucket_owner), folder_path + def get_objects(self): + bucket_name, folder_name = self.bucket.name.split("/") + return self.aws_bucket_service.get_objects( + bucket_name=bucket_name, folder_name=folder_name, + ) + + def archive_object(self, key, source_bucket=None, delete_original=True): + self.aws_bucket_service.archive_object( + key=key, source_bucket_name=source_bucket, delete_original=delete_original, + ) + class RoleGroup(EntityResource): """ @@ -758,9 +793,17 @@ def grant_bucket_access(self, bucket_arn, access_level, path_arns): self.arn, bucket_arn, access_level, path_arns ) + def grant_folder_access(self, root_folder_path, access_level, paths): + self.aws_policy_service.grant_folder_access( + self.arn, root_folder_path, access_level, paths + ) + def revoke_bucket_access(self, bucket_arn): self.aws_policy_service.revoke_policy_bucket_access(self.arn, bucket_arn) + def revoke_folder_access(self, root_folder_path): + self.aws_policy_service.revoke_policy_folder_access(self.arn, root_folder_path) + class AppParameter(EntityResource): diff --git a/controlpanel/api/message_broker.py b/controlpanel/api/message_broker.py new file mode 100644 index 000000000..bc682dbd0 --- /dev/null +++ b/controlpanel/api/message_broker.py @@ -0,0 +1,202 @@ +import os +import socket +import uuid +import json +import base64 +from collections.abc import Mapping + +from controlpanel.api.aws import AWSSQS +from controlpanel import celery_app + + +class MessageProtocolError(Exception): + pass + + +class MessageProtocol: + + def __init__(self, task_id: str, task_name: str, queue_name: str, args=None, kwargs=None): + self.task_id = task_id + self.task_name = task_name + self.queue_name = queue_name + self.args = args or () + self.kwargs = kwargs or {} + self. _validate() + + def _validate(self): + if not isinstance(self.args, (list, tuple)): + raise TypeError('task args must be a list or tuple') + if not isinstance(self.kwargs, Mapping): + raise TypeError('task keyword arguments must be a mapping') + try: + uuid.UUID(str(self.task_id)) + except ValueError: + raise TypeError('task id arguments must be a uuid') + if not self.task_name: + raise TypeError('task name arguments must not be blank') + if not self.queue_name: + raise TypeError('queue name arguments must not be blank') + + def prepare_message(self): + raise NotImplementedError("Note implemented") + + +class SimpleBase64: + """Base64 codec.""" + + @staticmethod + def str_to_bytes(s): + """Convert str to bytes.""" + if isinstance(s, str): + return s.encode() + return s + + @staticmethod + def bytes_to_str(s): + """Convert bytes to str.""" + if isinstance(s, bytes): + return s.decode(errors='replace') + return s + + def encode(self, s): + return self.bytes_to_str(base64.b64encode(self.str_to_bytes(s))) + + def decode(self, s): + return base64.b64decode(self.str_to_bytes(s)) + + +class CeleryTaskMessage(MessageProtocol): + + #: Default body encoding. + DEFAULT_BODY_ENCODING = 'base64' + DEFAULT_CONTENT_TYPE = "application/json" + DEFAULT_CONTENT_ENCODING = "utf-8" + DEFAULT_PRIORITY = 0 + + DEFAULT_FUNC_SIGNATURE = None + + codecs = {'base64': SimpleBase64()} + + @staticmethod + def _anon_nodename(): + """Return the nodename for this process (not a worker). + + This is used for e.g. the origin task message field. + """ + return f"{os.getpid()}@{socket.gethostname()}" + + def _init_message(self): + headers = { + 'lang': 'py', + 'task': self.task_name, + 'id': self.task_id, + 'group': None, + 'root_id': self.task_id, + 'parent_id': None, + 'origin': self._anon_nodename() + } + + message = dict( + headers=headers, + properties={ + 'correlation_id': self.task_id, + }, + body=( + self.args, self.kwargs, { + 'callbacks': self.DEFAULT_FUNC_SIGNATURE, + 'errbacks': self.DEFAULT_FUNC_SIGNATURE, + 'chain': self.DEFAULT_FUNC_SIGNATURE, + 'chord': self.DEFAULT_FUNC_SIGNATURE, + }, + ) + ) + return message + + def prepare_message(self): + message = self._init_message() + properties = message.get("properties") or {} + info = properties.setdefault('delivery_info', {}) + info['priority'] = self.DEFAULT_PRIORITY or 0 + message['content-encoding'] = self.DEFAULT_CONTENT_ENCODING + message['content-type'] = self.DEFAULT_CONTENT_TYPE + message['body'], body_encoding = self.__class__.encode_body( + json.dumps(message['body']), self.DEFAULT_BODY_ENCODING + ) + props = message['properties'] + props.update( + body_encoding=body_encoding, + delivery_tag=str(uuid.uuid4()), + ) + props['delivery_info'].update( + routing_key=self.queue_name, + ) + encoded_message, _ = self.__class__.encode_body( + json.dumps(message), self.DEFAULT_BODY_ENCODING) + return encoded_message + + @classmethod + def encode_body(cls, body, encoding=None): + if encoding: + return cls.codecs.get(encoding).encode(body), encoding + return body, encoding + + @classmethod + def decode_body(cls, body, encoding=None): + if encoding: + return cls.codecs.get(encoding).decode(body) + return body + + @classmethod + def validate_message(cls, message): + decoded_message = cls.decode_body(message, cls.DEFAULT_BODY_ENCODING) + try: + message_body = json.loads(decoded_message) + assert message_body["content-encoding"] == cls.DEFAULT_CONTENT_ENCODING + assert message_body["content-type"] == cls.DEFAULT_CONTENT_TYPE + assert message_body["headers"]["id"] == message_body["headers"]["root_id"] + assert message_body["headers"]["id"] == message_body["properties"]["correlation_id"] + assert type(json.loads(cls.decode_body(message_body["body"], cls.DEFAULT_BODY_ENCODING))) == list + return True, message_body + except (ValueError, KeyError): + return False, None + + +class MessageBrokerClient: + DEFAULT_MESSAGE_PROTOCOL = "celery" + + MESSAGE_PROTOCOL_MAP_TABLE = { + "celery": CeleryTaskMessage + } + + def __init__(self, message_protocol=None): + self.message_protocol = message_protocol or self.DEFAULT_MESSAGE_PROTOCOL + self.client = self._get_client() + + def _get_client(self): + return AWSSQS() + + def send_message(self, task_id, task_name, queue_name, args): + message_class = self.MESSAGE_PROTOCOL_MAP_TABLE.get(self.message_protocol) + if not message_class: + raise MessageProtocolError("Not support!") + + message = message_class( + task_id=task_id, + task_name=task_name, + queue_name=queue_name, + args=tuple(args) + ).prepare_message() + self._get_client().send_message(queue_name=queue_name, message_body=message) + return message + + +class LocalMessageBrokerClient: + """ + Uses celery to send tasks so that it can be used with a message broker running + locally such as Redis + """ + @staticmethod + def send_message(task_id, task_name, queue_name, args): + return celery_app.send_task( + task_name, task_id=task_id, queue_name=queue_name, args=args, + ) diff --git a/controlpanel/api/migrations/0030_task.py b/controlpanel/api/migrations/0030_task.py new file mode 100644 index 000000000..d231a8915 --- /dev/null +++ b/controlpanel/api/migrations/0030_task.py @@ -0,0 +1,44 @@ +# Generated by Django 4.2.1 on 2023-07-30 14:52 + +from django.db import migrations, models +import django_extensions.db.fields + + +class Migration(migrations.Migration): + dependencies = [ + ("api", "0029_remove_tool_target_infrastructure"), + ] + + operations = [ + migrations.CreateModel( + name="Task", + fields=[ + ( + "created", + django_extensions.db.fields.CreationDateTimeField( + auto_now_add=True, verbose_name="created" + ), + ), + ( + "modified", + django_extensions.db.fields.ModificationDateTimeField( + auto_now=True, verbose_name="modified" + ), + ), + ("entity_class", models.CharField(max_length=20)), + ("entity_description", models.CharField(max_length=128)), + ("entity_id", models.BigIntegerField()), + ("user_id", models.CharField(max_length=128)), + ("task_id", models.UUIDField(primary_key=True, serialize=False)), + ("task_name", models.CharField(max_length=60)), + ("task_description", models.CharField(max_length=128)), + ("queue_name", models.CharField(max_length=60)), + ("completed", models.BooleanField(default=False)), + ("message_body", models.CharField(max_length=4000)), + ], + options={ + "db_table": "control_panel_api_task", + "ordering": ("entity_class", "entity_id"), + }, + ), + ] diff --git a/controlpanel/api/migrations/0031_add_soft_delete_fields.py b/controlpanel/api/migrations/0031_add_soft_delete_fields.py new file mode 100644 index 000000000..c66261360 --- /dev/null +++ b/controlpanel/api/migrations/0031_add_soft_delete_fields.py @@ -0,0 +1,31 @@ +# Generated by Django 4.2.1 on 2023-10-09 15:33 + +# Third-party +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0030_task'), + ] + + operations = [ + migrations.AddField( + model_name='s3bucket', + name='deleted_at', + field=models.DateTimeField(null=True), + ), + migrations.AddField( + model_name='s3bucket', + name='deleted_by', + field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='deleted_s3buckets', to=settings.AUTH_USER_MODEL), + ), + migrations.AddField( + model_name='s3bucket', + name='is_deleted', + field=models.BooleanField(default=False), + ), + ] diff --git a/controlpanel/api/models/__init__.py b/controlpanel/api/models/__init__.py index 139a18257..e9383894b 100644 --- a/controlpanel/api/models/__init__.py +++ b/controlpanel/api/models/__init__.py @@ -16,3 +16,4 @@ from controlpanel.api.models.userapp import UserApp from controlpanel.api.models.users3bucket import UserS3Bucket from controlpanel.api.models.app_ip_allowlist import AppIPAllowList +from controlpanel.api.models.task import Task diff --git a/controlpanel/api/models/access_to_s3bucket.py b/controlpanel/api/models/access_to_s3bucket.py index 38cc543cf..369f216fa 100644 --- a/controlpanel/api/models/access_to_s3bucket.py +++ b/controlpanel/api/models/access_to_s3bucket.py @@ -76,6 +76,15 @@ def resources(self): # MUST use signals because cascade deletes do not call delete() @receiver(models.signals.pre_delete) def revoke_access(sender, **kwargs): - if issubclass(sender, AccessToS3Bucket): - obj = kwargs["instance"] + """ + Revokes access when the delete is via cascade delete, as these do not call the + instance level delete() method. Checks that the origin is different to the instance, + to ensure that revoke_bucket_access is not called twice when deleting an access + object directly. + """ + if not issubclass(sender, AccessToS3Bucket): + return + + obj = kwargs["instance"] + if obj != kwargs["origin"]: obj.revoke_bucket_access() diff --git a/controlpanel/api/models/app.py b/controlpanel/api/models/app.py index 98036d78d..e94041b28 100644 --- a/controlpanel/api/models/app.py +++ b/controlpanel/api/models/app.py @@ -13,6 +13,7 @@ from controlpanel.api import auth0, cluster from controlpanel.api.models import IPAllowlist from controlpanel.utils import github_repository_name, s3_slugify, webapp_release_name +from controlpanel.api import tasks class App(TimeStampedModel): @@ -34,6 +35,13 @@ class App(TimeStampedModel): # are not within the fields which will be searched frequently app_conf = models.JSONField(null=True) + # Non database field just for passing extra parameters + disable_authentication = False + connections = {} + current_user = None + deployment_envs = [] + has_ip_ranges = False + DEFAULT_AUTH_CATEGORY = "primary" KEY_WORD_FOR_AUTH_SETTINGS = "auth_settings" @@ -43,6 +51,15 @@ class Meta: db_table = "control_panel_api_app" ordering = ("name",) + def __init__(self, *args, **kwargs): + """Overwrite this constructor to pass some non-field parameter""" + self.disable_authentication = kwargs.pop("disable_authentication", False) + self.connections = kwargs.pop("connections", {}) + self.current_user = kwargs.pop("current_user", None) + self.deployment_envs = kwargs.pop("deployment_envs", []) + self.has_ip_ranges = kwargs.pop("has_ip_ranges", False) + super().__init__(*args, **kwargs) + def __repr__(self): return f"" @@ -232,15 +249,6 @@ def auth0_client_name(self, env_name=None): return settings.AUTH0_CLIENT_NAME_PATTERN.format( app_name=client_name, env=env_name) - @property - def migration_info(self): - # TODO: using app.description for temporary place for storing old app info, - # The content of this field should be removed after app migration is completed. - try: - return json.loads(self.description).get("migration", {}) - except ValueError: - return {} - def app_url_name(self, env_name): format_pattern = settings.APP_URL_NAME_PATTERN.get(env_name.upper()) if not format_pattern: @@ -303,3 +311,27 @@ class DeleteCustomerError(Exception): App.AddCustomerError = AddCustomerError App.DeleteCustomerError = DeleteCustomerError + + +from django.db.models.signals import post_save, post_delete +from django.dispatch import receiver + + +@receiver(post_save, sender=App) +def trigger_app_create_related_messages(sender, instance, created, **kwargs): + if created: + tasks.AppCreateRole(instance, instance.current_user).create_task() + tasks.AppCreateAuth(instance, instance.current_user, extra_data=dict( + deployment_envs=instance.deployment_envs, + disable_authentication=instance.disable_authentication, + connections=instance.connections, + has_ip_ranges=instance.has_ip_ranges, + )).create_task() + + +@receiver(post_delete, sender=App) +def remove_app_related_tasks(sender, instance, **kwargs): + from controlpanel.api.models import Task + related_app_tasks = Task.objects.filter(entity_class="App", entity_id=instance.id) + for task in related_app_tasks: + task.delete() diff --git a/controlpanel/api/models/apps3bucket.py b/controlpanel/api/models/apps3bucket.py index 81740de15..9cd5d44d4 100644 --- a/controlpanel/api/models/apps3bucket.py +++ b/controlpanel/api/models/apps3bucket.py @@ -2,7 +2,7 @@ from django.db import models # First-party/Local -from controlpanel.api import cluster +from controlpanel.api import cluster, tasks from controlpanel.api.models.access_to_s3bucket import AccessToS3Bucket @@ -19,12 +19,20 @@ class AppS3Bucket(AccessToS3Bucket): on_delete=models.CASCADE, ) + # Non database field just for passing extra parameters + current_user = None + class Meta: db_table = "control_panel_api_apps3bucket" # one record per app/s3bucket unique_together = ("app", "s3bucket") ordering = ("id",) + def __init__(self, *args, **kwargs): + """Overwrite this constructor to pass some non-field parameter""" + self.current_user = kwargs.pop("current_user", None) + super().__init__(*args, **kwargs) + @property def iam_role_name(self): return self.app.iam_role_name @@ -33,11 +41,7 @@ def __repr__(self): return f"" def grant_bucket_access(self): - cluster.App(self.app).grant_bucket_access( - self.s3bucket.arn, - self.access_level, - self.resources, - ) + tasks.S3BucketGrantToApp(self, self.current_user).create_task() def revoke_bucket_access(self): - cluster.App(self.app).revoke_bucket_access(self.s3bucket.arn) + tasks.S3BucketRevokeAppAccess(self, self.current_user).create_task() diff --git a/controlpanel/api/models/policys3bucket.py b/controlpanel/api/models/policys3bucket.py index 4ab0d346e..feb7e4299 100644 --- a/controlpanel/api/models/policys3bucket.py +++ b/controlpanel/api/models/policys3bucket.py @@ -21,7 +21,17 @@ class Meta: unique_together = ("policy", "s3bucket") ordering = ("id",) + def __init__(self, *args, **kwargs): + self.current_user = kwargs.pop("current_user", None) + super().__init__(*args, **kwargs) + def grant_bucket_access(self): + if self.s3bucket.is_folder: + return cluster.RoleGroup(self.policy).grant_folder_access( + root_folder_path=self.s3bucket.name, + access_level=self.access_level, + paths=self.paths, + ) cluster.RoleGroup(self.policy).grant_bucket_access( self.s3bucket.arn, self.access_level, @@ -29,4 +39,10 @@ def grant_bucket_access(self): ) def revoke_bucket_access(self): + # TODO update to use a Task to revoke access, to match user/app access + if self.s3bucket.is_folder: + return cluster.RoleGroup(self.policy).revoke_folder_access( + root_folder_path=self.s3bucket.name + ) + cluster.RoleGroup(self.policy).revoke_bucket_access(self.s3bucket.arn) diff --git a/controlpanel/api/models/s3bucket.py b/controlpanel/api/models/s3bucket.py index b1dcb4776..2194babb8 100644 --- a/controlpanel/api/models/s3bucket.py +++ b/controlpanel/api/models/s3bucket.py @@ -3,14 +3,16 @@ # Third-party from django.conf import settings +from django.contrib.auth.models import User from django.core.validators import MinLengthValidator from django.db import models from django.db.models import Q from django.db.transaction import atomic +from django.utils import timezone from django_extensions.db.models import TimeStampedModel # First-party/Local -from controlpanel.api import cluster, validators +from controlpanel.api import cluster, tasks, validators from controlpanel.api.models.apps3bucket import AppS3Bucket from controlpanel.api.models.users3bucket import UserS3Bucket @@ -56,6 +58,14 @@ class S3Bucket(TimeStampedModel): is_data_warehouse = models.BooleanField(default=False) # TODO remove this field - it's unused location_url = models.CharField(max_length=128, null=True) + is_deleted = models.BooleanField(default=False) + deleted_by = models.ForeignKey( + "User", + on_delete=models.SET_NULL, + null=True, + related_name="deleted_s3buckets" + ) + deleted_at = models.DateTimeField(null=True) objects = S3BucketQuerySet.as_manager() @@ -128,21 +138,27 @@ def access_level(self, user): def save(self, *args, **kwargs): is_create = not self.pk - super().save(*args, **kwargs) - - if is_create: - bucket_owner = kwargs.pop("bucket_owner", self.bucket_owner) - self.cluster.create(bucket_owner) - - # XXX created_by is always set if model is saved by the API view - if self.created_by: - UserS3Bucket.objects.create( - user=self.created_by, - s3bucket=self, - is_admin=True, - access_level=UserS3Bucket.READWRITE, - ) + if not is_create: + return self + + tasks.S3BucketCreate( + entity=self, + user=self.created_by, + extra_data={ + "bucket_owner": kwargs.pop("bucket_owner", self.bucket_owner), + } + ).create_task() + + # created_by should always be set, but this is a failsafe + if self.created_by: + UserS3Bucket.objects.create( + user=self.created_by, + current_user=self.created_by, + s3bucket=self, + is_admin=True, + access_level=UserS3Bucket.READWRITE, + ) return self @@ -152,3 +168,19 @@ def delete(self, *args, **kwargs): if not self.is_folder: self.cluster.mark_for_archival() super().delete(*args, **kwargs) + + def soft_delete(self, deleted_by: User): + """ + Mark the object as deleted, but do not remove it from the database + """ + self.is_deleted = True + self.deleted_by = deleted_by + self.deleted_at = timezone.now() + self.save() + # TODO update to handle deleting folders + if self.is_folder: + tasks.S3BucketArchive(self, self.deleted_by).create_task() + else: + self.cluster.mark_for_archival() + + tasks.S3BucketRevokeAllAccess(self, self.deleted_by).create_task() diff --git a/controlpanel/api/models/task.py b/controlpanel/api/models/task.py new file mode 100644 index 000000000..815d48be1 --- /dev/null +++ b/controlpanel/api/models/task.py @@ -0,0 +1,50 @@ +import json + +# Third-party +from django.db import models +from django_extensions.db.models import TimeStampedModel + +from controlpanel.utils import send_sse + + +class Task(TimeStampedModel): + """ + Use the task table to track the basic status of task fired from the app + """ + + entity_class = models.CharField(max_length=20) + entity_description = models.CharField(max_length=128) + entity_id = models.BigIntegerField() + user_id = models.CharField(max_length=128) + task_id = models.UUIDField(primary_key=True) + task_name = models.CharField(max_length=60) + task_description = models.CharField(max_length=128) + queue_name = models.CharField(max_length=60) + completed = models.BooleanField(default=False) + message_body = models.CharField(max_length=4000) + + class Meta: + db_table = "control_panel_api_task" + ordering = ("entity_class", "entity_id") + + def __repr__(self): + return f"" + + def save(self, *args, **kwargs): + super().save(*args, **kwargs) + + if self.completed: + payload = { + "entity_name": self.entity_description, + "task_description": self.task_description, + "status": "COMPLETED", + } + send_sse( + self.user_id, + { + "event": "taskStatus", + "data": json.dumps(payload), + }, + ) + + diff --git a/controlpanel/api/models/users3bucket.py b/controlpanel/api/models/users3bucket.py index 1e54f2f28..7e5cfbec3 100644 --- a/controlpanel/api/models/users3bucket.py +++ b/controlpanel/api/models/users3bucket.py @@ -2,7 +2,7 @@ from django.db import models # First-party/Local -from controlpanel.api import cluster +from controlpanel.api import cluster, tasks from controlpanel.api.models.access_to_s3bucket import AccessToS3Bucket @@ -20,12 +20,20 @@ class UserS3Bucket(AccessToS3Bucket): ) is_admin = models.BooleanField(default=False) + # Non database field just for passing extra parameters + current_user = None + class Meta: db_table = "control_panel_api_users3bucket" # one record per user/s3bucket unique_together = ("user", "s3bucket") ordering = ("id",) + def __init__(self, *args, **kwargs): + """Overwrite this constructor to pass some non-field parameter""" + self.current_user = kwargs.pop("current_user", None) + super().__init__(*args, **kwargs) + @property def iam_role_name(self): return self.user.iam_role_name @@ -37,19 +45,10 @@ def __repr__(self): ) def grant_bucket_access(self): - if self.s3bucket.is_folder: - # TODO update to include paths/resources - # will be implemented in ANPL-1592 - return cluster.User(self.user).grant_folder_access( - bucket_arn=self.s3bucket.arn, - access_level=self.access_level, - ) - - cluster.User(self.user).grant_bucket_access( - self.s3bucket.arn, - self.access_level, - self.resources, - ) + tasks.S3BucketGrantToUser(self, self.current_user).create_task() def revoke_bucket_access(self): - cluster.User(self.user).revoke_bucket_access(self.s3bucket.arn) + # TODO when soft delete is added, this should be updated to use the user that + # has deleted the parent S3bucket to ensure we store the user that has sent the + # task in the case of cascading deletes + tasks.S3BucketRevokeUserAccess(self, self.current_user).create_task() diff --git a/controlpanel/api/rules.py b/controlpanel/api/rules.py index 5d48bb784..058a3f7bb 100644 --- a/controlpanel/api/rules.py +++ b/controlpanel/api/rules.py @@ -214,3 +214,5 @@ def is_owner(user, obj): add_perm("api.retrieve_parameter", is_authenticated & is_owner) add_perm("api.update_parameter", is_authenticated & is_owner) add_perm("api.destroy_parameter", is_authenticated & is_owner) + +add_perm("api.list_task", is_authenticated & is_superuser) diff --git a/controlpanel/api/serializers.py b/controlpanel/api/serializers.py index e33b21f14..7761379c4 100644 --- a/controlpanel/api/serializers.py +++ b/controlpanel/api/serializers.py @@ -209,12 +209,18 @@ class Meta: "created_by", "is_data_warehouse", "location_url", + "is_deleted", + "deleted_by", + "deleted_at", ) read_only_fields = ( "apps3buckets", "users3buckets", "created_by", "url", + "is_deleted", + "deleted_by", + "deleted_at", ) @@ -404,8 +410,10 @@ def _process_existing_env_settings(self, app_auth_settings, auth_settings_status env_data["variables"] = sorted(var_data.values(), key=lambda x: x["name"]) env_data["auth_required"] = auth_required - def _process_redundant_envs(self, app_auth_settings, auth_settings_status): + # NB. if earlier call to get app_auth_settings failed, this will have been + # passed into serializer as an empty dict. Which results in all env details + # being marked as redundant mistakenly redundant_envs = list(set(auth_settings_status.keys()) - set(app_auth_settings.keys())) for env_name in redundant_envs: diff --git a/controlpanel/api/tasks/__init__.py b/controlpanel/api/tasks/__init__.py new file mode 100644 index 000000000..5f1ccf51f --- /dev/null +++ b/controlpanel/api/tasks/__init__.py @@ -0,0 +1,13 @@ + +# First-party/Local +from controlpanel.api.tasks.app import AppCreateAuth, AppCreateRole +from controlpanel.api.tasks.s3bucket import ( + S3BucketArchive, + S3BucketArchiveObject, + S3BucketCreate, + S3BucketGrantToApp, + S3BucketGrantToUser, + S3BucketRevokeAllAccess, + S3BucketRevokeAppAccess, + S3BucketRevokeUserAccess, +) diff --git a/controlpanel/api/tasks/app.py b/controlpanel/api/tasks/app.py new file mode 100644 index 000000000..ba399419d --- /dev/null +++ b/controlpanel/api/tasks/app.py @@ -0,0 +1,41 @@ +# Third-party +from django.conf import settings + +# First-party/Local +from controlpanel.api.tasks.task_base import TaskBase + + +class AppCreateRole(TaskBase): + ENTITY_CLASS = "App" + QUEUE_NAME = settings.IAM_QUEUE_NAME + + @property + def task_name(self): + return "create_app_aws_role" + + @property + def task_description(self): + return "creating aws role" + + +class AppCreateAuth(AppCreateRole): + + QUEUE_NAME = settings.AUTH_QUEUE_NAME + + @property + def task_name(self): + return "create_app_auth_settings" + + def _get_args_list(self): + return [ + self.entity.id, + self.user.id if self.user else 'None', + self.extra_data.get('deployment_envs'), + self.extra_data.get('disable_authentication'), + self.extra_data.get('connections'), + # self.extra_data.get('has_ip_ranges') # NOT USED, REMOVE IT? + ] + + @property + def task_description(self): + return "creating auth settings" diff --git a/controlpanel/api/tasks/handlers/__init__.py b/controlpanel/api/tasks/handlers/__init__.py new file mode 100644 index 000000000..3bc6b9fc0 --- /dev/null +++ b/controlpanel/api/tasks/handlers/__init__.py @@ -0,0 +1,24 @@ +# First-party/Local +from controlpanel import celery_app +from controlpanel.api.tasks.handlers.app import CreateAppAuthSettings, CreateAppAWSRole +from controlpanel.api.tasks.handlers.s3 import ( + ArchiveS3Bucket, + ArchiveS3Object, + CreateS3Bucket, + GrantAppS3BucketAccess, + GrantUserS3BucketAccess, + S3BucketRevokeAllAccess, + S3BucketRevokeAppAccess, + S3BucketRevokeUserAccess, +) + +create_app_aws_role = celery_app.register_task(CreateAppAWSRole()) +create_s3bucket = celery_app.register_task(CreateS3Bucket()) +grant_app_s3bucket_access = celery_app.register_task(GrantAppS3BucketAccess()) +grant_user_s3bucket_access = celery_app.register_task(GrantUserS3BucketAccess()) +create_app_auth_settings = celery_app.register_task(CreateAppAuthSettings()) +revoke_user_s3bucket_access = celery_app.register_task(S3BucketRevokeUserAccess()) +revoke_app_s3bucket_access = celery_app.register_task(S3BucketRevokeAppAccess()) +revoke_all_access_s3bucket = celery_app.register_task(S3BucketRevokeAllAccess()) +archive_s3bucket = celery_app.register_task(ArchiveS3Bucket) +archive_s3_object = celery_app.register_task(ArchiveS3Object) diff --git a/controlpanel/api/tasks/handlers/app.py b/controlpanel/api/tasks/handlers/app.py new file mode 100644 index 000000000..accc6d68b --- /dev/null +++ b/controlpanel/api/tasks/handlers/app.py @@ -0,0 +1,32 @@ +# First-party/Local +from controlpanel.api import cluster +from controlpanel.api.models import App, User +from controlpanel.api.tasks.handlers.base import BaseModelTaskHandler + + +class CreateAppAuthSettings(BaseModelTaskHandler): + model = App + name = "create_app_auth_settings" + + def handle(self, envs, disable_authentication, connections): + task_user = User.objects.filter(pk=self.task_user_pk).first() + if not task_user or not task_user.github_api_token: + # TODO maybe log this as something has gone wrong? + return self.complete() + + for env in envs: + cluster.App(self.object, task_user.github_api_token).create_auth_settings( + env_name=env, + disable_authentication=disable_authentication, + connections=connections, + ) + self.complete() + + +class CreateAppAWSRole(BaseModelTaskHandler): + model = App + name = "create_app_aws_role" + + def handle(self): + cluster.App(self.object).create_iam_role() + self.complete() diff --git a/controlpanel/api/tasks/handlers/base.py b/controlpanel/api/tasks/handlers/base.py new file mode 100644 index 000000000..ecccf684b --- /dev/null +++ b/controlpanel/api/tasks/handlers/base.py @@ -0,0 +1,67 @@ +# Third-party +from celery import Task as CeleryTask + +# First-party/Local +from controlpanel.api.models import Task + + +class BaseTaskHandler(CeleryTask): + # can be applied to project settings also + # these settings mean that messages are only removed from the queue (acknowledged) + # when returned. if an error occurs, they remain in the queue, and will be resent + # to the worker when the "visibility_timeout" has expired. "visibility_timeout" is + # a setting that is configured in SQS per queue. Currently set to 30secs + acks_late = True + acks_on_failure_or_timeout = False + task_obj = None + + def complete(self): + if self.task_obj: + self.task_obj.completed = True + self.task_obj.save() + + def get_task_obj(self): + return Task.objects.filter(task_id=self.request.id).first() + + def run(self, *args, **kwargs): + self.task_obj = self.get_task_obj() + if self.task_obj and self.task_obj.completed: + return + self.handle(*args, **kwargs) + + def handle(self, *args, **kwargs): + """ + Should contain the logic to run the task, and will be called after the run + method has been successfully called. + """ + raise NotImplementedError("Task logic not implemented") + + +class BaseModelTaskHandler(BaseTaskHandler): + name = None + model = None + object = None + task_user_pk = None + + def get_object(self, pk): + try: + return self.model.objects.get(pk=pk) + except self.model.DoesNotExist as exc: + # if the main object cannot be found, raise error and allow message to be + # added back to the queue as could be due to a race condition + raise exc + + def run(self, obj_pk, task_user_pk, *args, **kwargs): + """ + Default method that a celery Task object requires to be defined, and will be + called by the worker when a message is received by the queue. This will look up + the instance of the model, and store the PK of the user running the task to use + to look up the user later if required. The `handle` method is then called + with any other args and kwargs sent. + """ + self.task_obj = self.get_task_obj() + if self.task_obj and self.task_obj.completed: + return + self.object = self.get_object(obj_pk) + self.task_user_pk = task_user_pk + self.handle(*args, **kwargs) diff --git a/controlpanel/api/tasks/handlers/s3.py b/controlpanel/api/tasks/handlers/s3.py new file mode 100644 index 000000000..65773c320 --- /dev/null +++ b/controlpanel/api/tasks/handlers/s3.py @@ -0,0 +1,125 @@ +# Third-party +import structlog +from django.db.models.deletion import Collector + +# First-party/Local +from controlpanel.api import cluster, tasks +from controlpanel.api.models import App, AppS3Bucket, S3Bucket, User, UserS3Bucket +from controlpanel.api.models.access_to_s3bucket import AccessToS3Bucket +from controlpanel.api.tasks.handlers.base import BaseModelTaskHandler, BaseTaskHandler + +log = structlog.getLogger(__name__) + + +class CreateS3Bucket(BaseModelTaskHandler): + model = S3Bucket + name = "create_s3bucket" + + def handle(self, bucket_owner=None): + bucket_owner = bucket_owner or "USER" + self.object.cluster.create(owner=bucket_owner) + self.complete() + + +class GrantAppS3BucketAccess(BaseModelTaskHandler): + model = AppS3Bucket + name = 'grant_app_s3bucket_access' + + def handle(self): + cluster.App(self.object.app).grant_bucket_access( + self.object.s3bucket.arn, + self.object.access_level, + self.object.resources, + ) + self.complete() + + +class GrantUserS3BucketAccess(BaseModelTaskHandler): + model = UserS3Bucket + name = "grant_user_s3bucket_access" + + def handle(self): + if self.object.s3bucket.is_folder: + cluster.User(self.object.user).grant_folder_access( + root_folder_path=self.object.s3bucket.name, + access_level=self.object.access_level, + paths=self.object.paths, + ) + else: + cluster.User(self.object.user).grant_bucket_access( + bucket_arn=self.object.s3bucket.arn, + access_level=self.object.access_level, + path_arns=self.object.resources, + ) + self.complete() + + +class S3BucketRevokeUserAccess(BaseTaskHandler): + name = "revoke_user_s3bucket_access" + + def handle(self, bucket_identifier, bucket_user_pk, is_folder): + bucket_user = User.objects.get(pk=bucket_user_pk) + if is_folder: + cluster.User(bucket_user).revoke_folder_access(bucket_identifier) + else: + cluster.User(bucket_user).revoke_bucket_access(bucket_identifier) + self.complete() + + +class S3BucketRevokeAppAccess(BaseTaskHandler): + name = "revoke_app_s3bucket_access" + + def handle(self, bucket_arn, app_pk): + try: + app = App.objects.get(pk=app_pk) + except App.DoesNotExist: + # if the app doesnt exist, nothing to revoke, so mark completed + self.complete() + cluster.App(app).revoke_bucket_access(bucket_arn) + self.complete() + + +class S3BucketRevokeAllAccess(BaseModelTaskHandler): + model = S3Bucket + name = "s3bucket_revoke_all_access" + + def handle(self, *args, **kwargs): + """ + When an S3Bucket is soft-deleted, the related objects that handle access will + remain in place. In order to keep IAM roles updated, this task collects objects + that would have been deleted by a cascade, and revokes access to deleted bucket + """ + task_user = User.objects.filter(pk=self.task_user_pk).first() + collector = Collector(using="default") + collector.collect([self.object]) + for model, instance in collector.instances_with_model(): + if not issubclass(model, AccessToS3Bucket): + continue + + instance.current_user = task_user + instance.revoke_bucket_access() + + self.complete() + + +class ArchiveS3Bucket(BaseModelTaskHandler): + model = S3Bucket + name = "archive_s3bucket" + + def handle(self, *args, **kwargs): + task_user = User.objects.filter(pk=self.task_user_pk).first() + for s3obj in cluster.S3Folder(self.object).get_objects(): + tasks.S3BucketArchiveObject( + self.object, task_user, extra_data={"s3obj_key": s3obj.key} + ).create_task() + self.complete() + + +class ArchiveS3Object(BaseModelTaskHandler): + model = S3Bucket + name = "archive_s3_object" + + def handle(self, s3obj_key): + # TODO update to use self.object.cluster to work with buckets + cluster.S3Folder(self.object).archive_object(key=s3obj_key) + self.complete() diff --git a/controlpanel/api/tasks/s3bucket.py b/controlpanel/api/tasks/s3bucket.py new file mode 100644 index 000000000..cccbff8d8 --- /dev/null +++ b/controlpanel/api/tasks/s3bucket.py @@ -0,0 +1,131 @@ +# Third-party +from django.conf import settings + +# First-party/Local +from controlpanel.api.tasks.task_base import TaskBase + + +class S3BucketCreate(TaskBase): + ENTITY_CLASS = "S3Bucket" + QUEUE_NAME = settings.S3_QUEUE_NAME + + @property + def task_name(self): + return "create_s3bucket" + + @property + def task_description(self): + return "creating s3 bucket" + + def _get_args_list(self): + return [ + self.entity.id, + self.user.id if self.user else 'None', + self.extra_data.get('bucket_owner'), + ] + + +class S3BucketRevokeAllAccess(TaskBase): + ENTITY_CLASS = "S3Bucket" + QUEUE_NAME = settings.S3_QUEUE_NAME + + @property + def task_name(self): + return "s3bucket_revoke_all_access" + + @property + def task_description(self): + return "Revokes all access to an S3 bucket" + + +class S3AccessMixin: + ACTION = None + ROLE = None + QUEUE_NAME = settings.IAM_QUEUE_NAME + + @property + def task_name(self): + return "{action}_{role}_s3bucket_access".format( + action=self.ACTION.lower(), + role=self.ROLE.lower() + ) + + @property + def task_description(self): + return "{action} access to the {role}".format( + action=self.ACTION.lower(), + role=self.ROLE.lower() + ) + + @property + def entity_description(self): + return self.entity.s3bucket.name + + +class S3BucketGrantToUser(S3AccessMixin, TaskBase): + ENTITY_CLASS = "UserS3Bucket" + ACTION = "GRANT" + ROLE = "USER" + + +class S3BucketGrantToApp(S3AccessMixin, TaskBase): + ENTITY_CLASS = "AppS3Bucket" + ACTION = "GRANT" + ROLE = "APP" + + +class S3BucketRevokeUserAccess(S3AccessMixin, TaskBase): + ENTITY_CLASS = "UserS3Bucket" + ACTION = "REVOKE" + ROLE = "USER" + + def _get_args_list(self): + bucket = self.entity.s3bucket + return [ + bucket.name if bucket.is_folder else bucket.arn, + self.entity.user.pk, + bucket.is_folder, + ] + + +class S3BucketRevokeAppAccess(S3AccessMixin, TaskBase): + ENTITY_CLASS = "AppS3Bucket" + ACTION = "REVOKE" + ROLE = "APP" + + def _get_args_list(self): + return [ + self.entity.s3bucket.arn, + self.entity.app.pk, + ] + + +class S3BucketArchive(TaskBase): + ENTITY_CLASS = "S3Bucket" + QUEUE_NAME = settings.S3_QUEUE_NAME + + @property + def task_name(self): + return "archive_s3bucket" + + @property + def task_description(self): + return "move contents of s3 datasource to the archive bucket" + + +class S3BucketArchiveObject(TaskBase): + ENTITY_CLASS = "S3Bucket" + QUEUE_NAME = settings.S3_QUEUE_NAME + + @property + def task_name(self): + return "archive_s3_object" + + @property + def task_description(self): + return "move object to archive bucket" + + def _get_args_list(self): + args = super()._get_args_list() + args.append(self.extra_data.get('s3obj_key')) + return args diff --git a/controlpanel/api/tasks/task_base.py b/controlpanel/api/tasks/task_base.py new file mode 100644 index 000000000..8b124ed54 --- /dev/null +++ b/controlpanel/api/tasks/task_base.py @@ -0,0 +1,91 @@ +# Standard library +import uuid + +# Third-party +from django.conf import settings + +# First-party/Local +from controlpanel.api.message_broker import ( + LocalMessageBrokerClient, + MessageBrokerClient, +) +from controlpanel.api.models.task import Task + + +class TaskError(Exception): + pass + + +class TaskBase: + + QUEUE_NAME = settings.DEFAULT_QUEUE + ENTITY_CLASS = None + + def __init__(self, entity, user=None, extra_data=None): + self._message_broker_client = None + self.entity = entity + self.user = user + self.extra_data = extra_data + self._validate() + + def _validate(self): + if not self.entity: + raise TaskError("Please provide entity instance") + if self.user and self.user.__class__.__name__ != "User": + raise TaskError("The instance has to be user class") + if self.entity.__class__.__name__ != self.ENTITY_CLASS: + raise TaskError(f"The instance has to be {self.ENTITY_CLASS} class") + + @property + def task_id(self): + return str(uuid.uuid4()) + + @property + def task_description(self): + raise NotImplementedError("Not implemented") + + @property + def entity_description(self): + return self.entity.name + + @property + def task_name(self): + raise NotImplementedError("Not implemented") + + @property + def message_broker_client(self): + if self._message_broker_client is None: + self._message_broker_client = self._get_message_broker_client() + return self._message_broker_client + + @staticmethod + def _get_message_broker_client(): + if settings.USE_LOCAL_MESSAGE_BROKER: + return LocalMessageBrokerClient() + return MessageBrokerClient() + + def _get_args_list(self): + args = [self.entity.id] + if self.user: + args.append(self.user.id) + return args + + def create_task(self): + task_id = self.task_id + message = self.message_broker_client.send_message( + task_id=task_id, + task_name=self.task_name, + queue_name=self.QUEUE_NAME, + args=self._get_args_list() + ) + Task.objects.create( + entity_class=self.ENTITY_CLASS, + entity_description=self.task_description, + entity_id=self.entity.id, + user_id=self.user.auth0_id if self.user else 'None', + task_id=task_id, + task_description=self.task_description, + task_name=self.task_name, + queue_name=self.QUEUE_NAME, + message_body=message + ) diff --git a/controlpanel/develop/__init__.py b/controlpanel/api/tasks/user.py similarity index 100% rename from controlpanel/develop/__init__.py rename to controlpanel/api/tasks/user.py diff --git a/controlpanel/api/urls.py b/controlpanel/api/urls.py index 89b551751..d8031516d 100644 --- a/controlpanel/api/urls.py +++ b/controlpanel/api/urls.py @@ -36,4 +36,9 @@ views.ToolDeploymentAPIView.as_view(), name="tool-deployments", ), + path( + "tasks//", + views.TaskAPIView.as_view(), + name="tasks", + ), ] diff --git a/controlpanel/api/views/__init__.py b/controlpanel/api/views/__init__.py index bffde87fb..cd76283df 100644 --- a/controlpanel/api/views/__init__.py +++ b/controlpanel/api/views/__init__.py @@ -16,3 +16,4 @@ from controlpanel.api.views.tools import ToolViewSet from controlpanel.api.views.apps import AppByNameViewSet from controlpanel.api.views.repos import RepoApi, RepoEnvironmentAPI +from controlpanel.api.views.tasks import TaskAPIView diff --git a/controlpanel/api/views/models.py b/controlpanel/api/views/models.py index 9a6ab04f0..9a0e44f5a 100644 --- a/controlpanel/api/views/models.py +++ b/controlpanel/api/views/models.py @@ -20,10 +20,12 @@ class UserViewSet(viewsets.ModelViewSet): + resource = "user" + queryset = User.objects.all() serializer_class = serializers.UserSerializer filter_backends = (DjangoFilterBackend,) - permission_classes = (permissions.UserPermissions,) + permission_classes = (permissions.UserPermissions | permissions.JWTTokenResourcePermissions,) class AppViewSet(viewsets.ModelViewSet): diff --git a/controlpanel/api/views/tasks.py b/controlpanel/api/views/tasks.py new file mode 100644 index 000000000..69cb75162 --- /dev/null +++ b/controlpanel/api/views/tasks.py @@ -0,0 +1,33 @@ +# Third-party +from rest_framework import status +from rest_framework.generics import GenericAPIView +from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response + +from controlpanel.api.models import Task +from controlpanel.api.message_broker import MessageBrokerClient + + +class TaskAPIView(GenericAPIView): + + http_method_names = ["post"] + permission_classes = (IsAuthenticated,) + + def _send_message(self, task_id): + task = Task.objects.filter(task_id=task_id).first() + if task: + message_client = MessageBrokerClient() + message_client.client.send_message( + queue_name=task.queue_name, + message_body=task.message_body + ) + + def post(self, request, *args, **kwargs): + task_id = self.kwargs["task_id"] + task_action = self.kwargs["action"] + task_action_function = getattr(self, f"_{task_action}", None) + if task_action_function and callable(task_action_function): + task_action_function(task_id) + return Response(status=status.HTTP_200_OK) + else: + return Response(status=status.HTTP_400_BAD_REQUEST) diff --git a/controlpanel/api/views/tool_deployments.py b/controlpanel/api/views/tool_deployments.py index 4444f71a8..6d81e1981 100644 --- a/controlpanel/api/views/tool_deployments.py +++ b/controlpanel/api/views/tool_deployments.py @@ -6,7 +6,7 @@ # First-party/Local from controlpanel.api import serializers -from controlpanel.frontend.consumers import start_background_task +from controlpanel.utils import start_background_task class ToolDeploymentAPIView(GenericAPIView): diff --git a/controlpanel/celery.py b/controlpanel/celery.py new file mode 100644 index 000000000..5aa3f345e --- /dev/null +++ b/controlpanel/celery.py @@ -0,0 +1,53 @@ +# Standard library +import os +from pathlib import Path + +# Third-party +import dotenv +import structlog +from celery import Celery +from django.conf import settings +from kombu import Queue + +# First-party/Local +from controlpanel.utils import load_app_conf_from_file + +dotenv.load_dotenv() + + +# Set the default Django settings module for the 'celery' program. +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'controlpanel.settings') +load_app_conf_from_file() + +app = Celery('controlpanel') + +# Using a string here means the worker doesn't have to serialize +# the configuration object to child processes. +# - namespace='CELERY' means all celery-related configuration keys +# should have a `CELERY_` prefix. +app.config_from_object('django.conf:settings') +# Load task modules from all registered Django apps. +app.autodiscover_tasks() + +log = structlog.getLogger(__name__) + + +@app.task(bind=True, ignore_result=True) +def debug_task(self): + print(f'Request: {self.request!r}') + + +@app.task(bind=True, ignore_result=True) +def worker_health_check(self): + Path(settings.WORKER_HEALTH_FILENAME).touch() + log.debug("Worker health ping task executed") + + +# ensures worker picks and runs tasks from all queues rather than just default queue +# alternative is to run the worker and pass queue name to -Q flag +app.conf.task_queues = [ + Queue(settings.IAM_QUEUE_NAME, routing_key=settings.IAM_QUEUE_NAME), + Queue(settings.AUTH_QUEUE_NAME, routing_key=settings.AUTH_QUEUE_NAME), + Queue(settings.S3_QUEUE_NAME, routing_key=settings.S3_QUEUE_NAME), +] +app.conf.task_default_exchange = "tasks" diff --git a/controlpanel/cli/management/commands/get_customer_emails_csv.py b/controlpanel/cli/management/commands/get_customer_emails_csv.py new file mode 100644 index 000000000..01137c804 --- /dev/null +++ b/controlpanel/cli/management/commands/get_customer_emails_csv.py @@ -0,0 +1,31 @@ +# Standard library +import csv +from datetime import datetime + +# Third-party +from django.core.management import BaseCommand + +# First-party/Local +from controlpanel.api import auth0 + + +class Command(BaseCommand): + help = "Writes a CSV with all customer emails for an auth0 group" + + def add_arguments(self, parser): + parser.add_argument( + "group_name", + type=str, + help="input: The auth0 group name to get customers emails for" + ) + + def handle(self, *args, **options): + group_name = options["group_name"] + auth_instance = auth0.ExtendedAuth0() + group_id = auth_instance.groups.get_group_id(group_name) + timestamp = datetime.now().strftime("%d-%m-%Y_%H%M") + with open(f"{group_name}_customers_{timestamp}.csv", "w") as f: + writer = csv.writer(f) + writer.writerow(["Email"]) + for customer in auth_instance.groups.get_group_members(group_id): + writer.writerow([customer["email"]]) diff --git a/controlpanel/cli/management/commands/migrating_customers.py b/controlpanel/cli/management/commands/migrating_customers.py index 9d978b8d0..ed92e993a 100644 --- a/controlpanel/cli/management/commands/migrating_customers.py +++ b/controlpanel/cli/management/commands/migrating_customers.py @@ -10,7 +10,10 @@ class Command(BaseCommand): - help = "Copy the customers of an application from old auth client to new auth clients" + help = """Copy the customers of an application from old auth client to new auth + clients. This is idempotent, meaning that rerunning for the apps will not + problems, all copied customers will remain but any un-copied customers will be + migrated.""" DEFAULT_ENVS = ["dev", "prod"] diff --git a/controlpanel/cli/management/commands/post_migration_clearup.py b/controlpanel/cli/management/commands/post_migration_clearup.py new file mode 100644 index 000000000..8315a72c6 --- /dev/null +++ b/controlpanel/cli/management/commands/post_migration_clearup.py @@ -0,0 +1,91 @@ + +# Third-party +from django.core.management.base import BaseCommand + +# First-party/Local +from controlpanel.api.models import App, AppS3Bucket +from controlpanel.api import auth0 + + +class Command(BaseCommand): + help = "Clear up the redundant resources for migrated apps " \ + "- remove the old auth0 related resources " \ + "- remove old auth0 information from control db" \ + "- remove the apps which are not required any more" + + SCRIPT_LOG_FILE_NAME = "./clear_up_auth0_resources_log.txt" + + EXCEPTION_APPS = ["gold-scorecard-form"] + + def add_arguments(self, parser): + parser.add_argument( + "-a", "--apply", action="store_true", help="Apply the actions" + ) + + def _remove_old_auth0_clients(self, app, auth0_instance): + old_client_info = (app.app_conf or {}).get(App.KEY_WORD_FOR_AUTH_SETTINGS, {}).\ + get(App.DEFAULT_AUTH_CATEGORY, {}) + if not old_client_info: + self._log_info(f"No old client for {app.slug} - {app.repo_url}") + return + + self._log_info(f"Removing the old client for {app.slug} - {app.repo_url}") + if self.apply_action: + auth0_instance.clear_up_app(old_client_info) + + def _update_db(self, app): + self._log_info(f"Removing the migration info and old clients for {app.slug} - {app.repo_url}") + app.description = "" + if App.DEFAULT_AUTH_CATEGORY in (app.app_conf or {}).get(App.KEY_WORD_FOR_AUTH_SETTINGS, {}): + del app.app_conf[App.KEY_WORD_FOR_AUTH_SETTINGS][App.DEFAULT_AUTH_CATEGORY] + if self.apply_action: + app.save() + + def _remove_application(self, app): + self._log_info(f"Removing the application {app.slug} - {app.repo_url}") + + """ TODO: how to deal with related bucket? we will output + the related datasets from this script""" + # log the related buckets information into file + related_buckets = AppS3Bucket.objects.filter(app_id=app.id) + for item in related_buckets: + self._log_info(f"The app links the bucket - {item.s3bucket.name}") + # Remove the relationship to avoid removal of the bucket when removing the app + if self.apply_action: + item.delete() + if self.apply_action: + app.delete() + + def _log_info(self, info): + self.stdout.write(info) + with open(self.SCRIPT_LOG_FILE_NAME, "a") as f: + f.write(info) + f.write("\n") + + def _clear_up_resources(self, auth0_instance): + apps = App.objects.all() + counter = 1 + for app in apps: + if app.slug in self.EXCEPTION_APPS: + self._log_info(f"Ignore the application {app.slug}") + continue + + try: + self._log_info(f"{counter}--Processing the application {app.slug}") + + self._remove_old_auth0_clients(app, auth0_instance) + self._update_db(app) + if "moj-analytical-services" in app.repo_url: + self._remove_application(app) + self._log_info(f"{counter}--Done with the application {app.slug}") + counter += 1 + except Exception as ex: + self._log_info(f"Failed to process {app.slug} due to error : {ex.__str__()}") + + def handle(self, *args, **options): + self.stdout.write("start to scan the apps from database.") + auth0_instance = auth0.ExtendedAuth0() + self.apply_action = options.get('apply') or False + self._clear_up_resources(auth0_instance) + self.stdout.write("Clean up action has completed.") + diff --git a/controlpanel/develop/apps.py b/controlpanel/develop/apps.py deleted file mode 100644 index 0aa76d51a..000000000 --- a/controlpanel/develop/apps.py +++ /dev/null @@ -1,6 +0,0 @@ -# Third-party -from django.apps import AppConfig - - -class DevelopConfig(AppConfig): - name = "controlpanel.develop" diff --git a/controlpanel/develop/templates/develop/index.html b/controlpanel/develop/templates/develop/index.html deleted file mode 100644 index f4ff53dec..000000000 --- a/controlpanel/develop/templates/develop/index.html +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - -

Development integration page

-

User: {{ username }}

- -

Status: {{ status | default:"Pending" }}

- -
- {% csrf_token %} - Tool installation - Jupyter Lab
- RStudio
- Airflow
- - -
- - -

Currently installed tools

-
    - {% for tool in installed_tools %} -
  • {{ tool }}
  • - {% endfor %} -
- - - - diff --git a/controlpanel/develop/urls.py b/controlpanel/develop/urls.py deleted file mode 100644 index 73f35aad8..000000000 --- a/controlpanel/develop/urls.py +++ /dev/null @@ -1,8 +0,0 @@ -# Third-party -from django.urls import path - -from .views import develop_index - -urlpatterns = [ - path("", develop_index, name="develop_index"), -] diff --git a/controlpanel/develop/views.py b/controlpanel/develop/views.py deleted file mode 100644 index 235537c04..000000000 --- a/controlpanel/develop/views.py +++ /dev/null @@ -1,45 +0,0 @@ -# Standard library -from typing import List - -# Third-party -from django.contrib.auth.decorators import login_required -from django.shortcuts import render - - -def installed_tools(username: str) -> List[str]: - # TODO: Get a list of this user's installed tools and return - # a list of string ["like", "this"] - - return [] - - -def user_selected_tool(username: str, toolname: str) -> str: - # TODO: Create/Ensure instance of the named tool - - return f"Install {toolname} for {username}" - - -@login_required() -def develop_index(request): - status = None - tool = None - - if request.method == "POST": - data = request.POST - - tool = data.get("tool", "") - if not tool: - status = "No tool selected" - else: - status = user_selected_tool(request.user, tool) - - return render( - request, - "develop/index.html", - { - "username": request.user, - "status": status, - "tool": tool, - "installed_tools": installed_tools(request.user), - }, - ) diff --git a/controlpanel/frontend/consumers.py b/controlpanel/frontend/consumers.py index a3f3895e7..4c5431ce1 100644 --- a/controlpanel/frontend/consumers.py +++ b/controlpanel/frontend/consumers.py @@ -9,10 +9,9 @@ # Third-party import structlog -from asgiref.sync import async_to_sync from channels.consumer import SyncConsumer -from channels.layers import get_channel_layer from django.db import transaction +from django.conf import settings # First-party/Local from controlpanel.api import cluster @@ -31,12 +30,8 @@ ToolDeployment, User, ) -from controlpanel.utils import PatchedAsyncHttpConsumer, sanitize_dns_label +from controlpanel.utils import (PatchedAsyncHttpConsumer, sanitize_dns_label, send_sse) -WORKER_HEALTH_FILENAME = "/tmp/worker_health.txt" - - -channel_layer = get_channel_layer() log = structlog.getLogger(__name__) @@ -234,21 +229,11 @@ def home_reset(self, message): log.debug(f"Reset home directory for user {user}") def workers_health(self, message): - Path(WORKER_HEALTH_FILENAME).touch() + Path(settings.WORKER_HEALTH_FILENAME).touch() log.debug("Worker health ping task executed") -def send_sse(user_id, event): - """ - Tell the SSEConsumer to send an event to the specified user - """ - async_to_sync(channel_layer.group_send)( - sanitize_dns_label(user_id), - {"type": "sse.event", **event}, - ) - - def update_tool_status(tool_deployment, id_token, status): user = tool_deployment.user tool = tool_deployment.tool @@ -282,16 +267,6 @@ def update_home_status(home_directory, status): ) -def start_background_task(task, message): - async_to_sync(channel_layer.send)( - "background_tasks", - { - "type": task, - **message, - }, - ) - - def wait_for_deployment(tool_deployment, id_token): status = TOOL_DEPLOYING while status == TOOL_DEPLOYING: diff --git a/controlpanel/frontend/forms.py b/controlpanel/frontend/forms.py index 9c841df54..4fe857c1c 100644 --- a/controlpanel/frontend/forms.py +++ b/controlpanel/frontend/forms.py @@ -146,7 +146,7 @@ class CreateAppForm(AppAuth0Form): required=False, ) existing_datasource_id = DatasourceChoiceField( - queryset=S3Bucket.objects.filter(is_data_warehouse=False), + queryset=S3Bucket.objects.filter(is_data_warehouse=False, is_deleted=False), empty_label="Select", required=False, ) @@ -284,7 +284,8 @@ class GrantAccessForm(forms.Form): label="Paths (optional)", help_text=( "Add specific paths for this user or group to access or leave blank " - "for whole bucket access" + "for full access. Paths must be separated by a newline, with a " + "leading forward slash and no trailing slash. For example: /my-path" ), required=False, delimiter="\n", @@ -308,12 +309,28 @@ def clean(self): cleaned_data["is_admin"] = True if cleaned_data["entity_type"] == "user": - cleaned_data["user_id"] = cleaned_data["entity_id"] + cleaned_data["user_id"] = cleaned_data.get("entity_id") elif cleaned_data["entity_type"] == "group": - cleaned_data["policy_id"] = cleaned_data["entity_id"] + cleaned_data["policy_id"] = cleaned_data.get("entity_id") return cleaned_data + def clean_paths(self): + """ + Validation to ensure paths are entered with a leading forward slash, and without + trailing slash. This is to ensure that the correct IAM permissions are added + at the aws.S3AccessPolicy level. + """ + paths = self.cleaned_data["paths"] + for path in paths: + if not path.startswith("/"): + raise ValidationError("Enter paths prefixed with a forward slash") + + if path.endswith("/"): + raise ValidationError("Enter paths without a trailing forward slash") + + return paths + class GrantAppAccessForm(forms.Form): access_level = forms.ChoiceField( @@ -336,9 +353,11 @@ def __init__(self, *args, **kwargs): if self.exclude_connected: self.fields["datasource"].queryset = S3Bucket.objects.exclude( id__in=[a.s3bucket_id for a in self.app.apps3buckets.all()], - ) + ).filter(is_deleted=False) else: - self.fields["datasource"].queryset = S3Bucket.objects.all() + self.fields["datasource"].queryset = S3Bucket.objects.filter( + is_deleted=False, + ) class CreateIAMManagedPolicyForm(forms.Form): diff --git a/controlpanel/frontend/jinja2/customers-list.html b/controlpanel/frontend/jinja2/customers-list.html index a31fed92d..9c3f042a4 100644 --- a/controlpanel/frontend/jinja2/customers-list.html +++ b/controlpanel/frontend/jinja2/customers-list.html @@ -18,7 +18,11 @@

Customer management for {{ app.name }}

-{% if groups_dict %} +{% if not groups_dict %} +

+ No need to manage the customers of the app on Control panel as it does not require authentication +

+{% else %}

Switch to different deployment environment from the following links: @@ -48,13 +52,6 @@

App customers under {{ groups_dict.get(group_id) }} environment {{ modal_dialog(app_customers_html|safe) }}

-{% else %} -

- No need to manage the customers of app on Control panel -

-{% endif %} - -
{{ csrf_input }} @@ -112,41 +109,44 @@

) }} -{% if request.user.has_perm('api.add_app_customer', app) %} - - {{ csrf_input }} -
- - {% if errors and errors.customer_email %} - {% for error in errors.customer_email %} - - Error: {{ error }} - - {% endfor %} - {% endif %} - -
-
- -
- -{% endif %} + {% if request.user.has_perm('api.add_app_customer', app) %} +
+ {{ csrf_input }} +
+ + {% if errors and errors.customer_email %} + {% for error in errors.customer_email %} + + Error: {{ error }} + + {% endfor %} + {% endif %} + +
+
+ +
+
+ {% endif %} + + {% if remove_perm %} +
+ {{ csrf_input }} +
+ + {{ remove_customer_form.email }} +
+
+ +
+
+ {% endif %} -{% if remove_perm %} -
- {{ csrf_input }} -
- - {{ remove_customer_form.email }} -
-
- -
-
{% endif %} + {% endblock %} diff --git a/controlpanel/frontend/jinja2/datasource-access-grant.html b/controlpanel/frontend/jinja2/datasource-access-grant.html index 9aadd3f2d..00ab26269 100644 --- a/controlpanel/frontend/jinja2/datasource-access-grant.html +++ b/controlpanel/frontend/jinja2/datasource-access-grant.html @@ -14,7 +14,7 @@

{{ page_title }}

{{ csrf_input }} -
+
-
+
- {% if form.errors %} + {% if form.access_level.errors %} Error: {% for error in form.errors.access_level %} diff --git a/controlpanel/frontend/jinja2/datasource-detail.html b/controlpanel/frontend/jinja2/datasource-detail.html index 82932d011..82106174a 100644 --- a/controlpanel/frontend/jinja2/datasource-detail.html +++ b/controlpanel/frontend/jinja2/datasource-detail.html @@ -20,13 +20,25 @@

{{ page_title }}

- - Open on AWS - + {% if bucket.is_deleted %} +

+ + + Warning + This bucket was deleted by + {{ user_name(bucket.deleted_by) }} on {{ bucket.deleted_at.strftime("%Y/%m/%d %H:%M:%S") }}.
+ All access listed below has been revoked in IAM. +
+
+ {% else %} + + Open on AWS + + {% endif %}

-
-

Users and groups with access

+
+

Users and groups with{% if bucket.is_deleted %} revoked{% endif %} access

@@ -63,7 +75,7 @@

Users and groups with access

{%- endif %} {% endfor %} + + {% set plural = access_list|length > 1 %}
- {% if request.user.has_perm('api.update_users3bucket', member) %} + {% if request.user.has_perm('api.update_users3bucket', member) and not bucket.is_deleted %} Edit access level @@ -73,17 +85,19 @@

Users and groups with access

{{ access_list|length }} - user{%- if access_list|length != 1 -%}s{% endif %} or group{%- if access_list|length != 1 -%}s have{% else %} has{% endif %} + user{%- if plural -%}s{% endif %} or group{%- if plural -%}s{% endif %}{% if bucket.is_deleted %} had{% elif plural %} have{% else %} has{% endif %} access to this {{ datasource_type }} data source
- {% if request.user.has_perm('api.create_users3bucket', bucket) and users_options|length %} + {% if request.user.has_perm('api.create_users3bucket', bucket) and users_options|length and not bucket.is_deleted %} Grant user access @@ -91,7 +105,7 @@

Users and groups with access

{% endif %} {% if request.user.has_perm('api.manage_groups') %} - {% if request.user.has_perm('api.create_policys3bucket', bucket) and policies_options|length %} + {% if request.user.has_perm('api.create_policys3bucket', bucket) and policies_options|length and not bucket.is_deleted %}
Grant group access @@ -212,7 +226,7 @@

Data access log

{% endif %} -{% if request.user.has_perm('api.destroy_s3bucket', bucket) %} +{% if not bucket.is_deleted and request.user.has_perm('api.destroy_s3bucket', bucket) %}
{{ csrf_input }} diff --git a/controlpanel/frontend/jinja2/datasource-list.html b/controlpanel/frontend/jinja2/datasource-list.html index 8e2027d88..7c575bc6a 100644 --- a/controlpanel/frontend/jinja2/datasource-list.html +++ b/controlpanel/frontend/jinja2/datasource-list.html @@ -4,12 +4,12 @@ {% extends "base.html" %} -{% if datasource_type %} - {% set page_name = datasource_type + "-datasource-list" %} - {% set page_title = "Your " + datasource_type + " data sources" %} -{% else %} +{% if all_datasources %} {% set page_name = "all-datasources" %} {% set page_title = "All data sources" %} +{% else %} + {% set page_name = datasource_type + "-datasource-list" %} + {% set page_title = "Your " + datasource_type + " data sources" %} {% endif %} {% set access_levels_html %} @@ -71,4 +71,9 @@

Other {{ datasource_type }} data sources

{% endif %} +{% if request.user.is_superuser and deleted_datasources %} +

Deleted data sources

+ {{ datasource_list(deleted_datasources, datasource_type|default(""), request.user) }} +{% endif %} + {% endblock %} diff --git a/controlpanel/frontend/jinja2/home.html b/controlpanel/frontend/jinja2/home.html index 2099de5a3..33107e2df 100644 --- a/controlpanel/frontend/jinja2/home.html +++ b/controlpanel/frontend/jinja2/home.html @@ -15,6 +15,7 @@

Superuser functions

  • List all data sources
  • List all tool releases
  • List all IP allowlists
  • +
  • List all tasks
  • {% endif %} diff --git a/controlpanel/frontend/jinja2/includes/app-list.html b/controlpanel/frontend/jinja2/includes/app-list.html index 0954212d6..f5338ec20 100644 --- a/controlpanel/frontend/jinja2/includes/app-list.html +++ b/controlpanel/frontend/jinja2/includes/app-list.html @@ -7,7 +7,7 @@ {% macro app_list(apps, user) %} {%- set num_apps = apps|length %} - +
    diff --git a/controlpanel/frontend/jinja2/includes/datasource-access-form.html b/controlpanel/frontend/jinja2/includes/datasource-access-form.html index d1531b4ad..38b6e40e5 100644 --- a/controlpanel/frontend/jinja2/includes/datasource-access-form.html +++ b/controlpanel/frontend/jinja2/includes/datasource-access-form.html @@ -1,7 +1,7 @@ {% from "includes/list-field.html" import list_field_textarea %} {% macro data_access_paths_textarea(field) -%} -
    +
    {{ list_field_textarea(field.name, field.label, field.help_text, field.value() or "", field.errors) }}
    {%- endmacro %} diff --git a/controlpanel/frontend/jinja2/includes/list-field.html b/controlpanel/frontend/jinja2/includes/list-field.html index cceec944e..0fae11dde 100644 --- a/controlpanel/frontend/jinja2/includes/list-field.html +++ b/controlpanel/frontend/jinja2/includes/list-field.html @@ -1,5 +1,5 @@ {% from "fieldset/macro.html" import govukFieldset %} - +{% from "error-message/macro.html" import govukErrorMessage %} {% macro list_field_textarea(name, label, help_text="", value="", errors={}) -%} {% call govukFieldset({ @@ -9,6 +9,9 @@ }, "help_text": help_text }) %} + {% if errors %} + {{ govukErrorMessage({"text": errors|join(". ")}) }} + {% endif %} {%- endcall %} {%- endmacro %} diff --git a/controlpanel/frontend/jinja2/includes/task-list.html b/controlpanel/frontend/jinja2/includes/task-list.html new file mode 100644 index 000000000..bd2c6b298 --- /dev/null +++ b/controlpanel/frontend/jinja2/includes/task-list.html @@ -0,0 +1,49 @@ +{% macro task_list(tasks, csrf_input) %} +{%- set num_tasks = tasks|length %} +
    App name
    + + + + + + + + + + + + + {%- for task in tasks %} + + + + + + + + + + {% endfor %} + + + + + + +
    Entity classEntity IDEntity descriptionTask IDTask descriptionCreate time + Action +
    {{ task.entity_class }}{{ task.entity_id }}{{ task.entity_description }}{{ task.task_id }}{{ task.task_description }}{{ task.created }} + + {{ csrf_input }} + + +
    + {{ num_tasks }} task{% if num_tasks != 1 %}s{% endif %} +
    +{% endmacro %} diff --git a/controlpanel/frontend/jinja2/policy-create.html b/controlpanel/frontend/jinja2/policy-create.html index 29b412092..74f7da365 100644 --- a/controlpanel/frontend/jinja2/policy-create.html +++ b/controlpanel/frontend/jinja2/policy-create.html @@ -8,6 +8,7 @@ {%- endset %} {% set page_name = "policies" %} +{% set page_title = "Create a group for use with s3 permissions" %} {% block content %}

    {{ legend }}

    diff --git a/controlpanel/frontend/jinja2/task-list.html b/controlpanel/frontend/jinja2/task-list.html new file mode 100644 index 000000000..debbfda1e --- /dev/null +++ b/controlpanel/frontend/jinja2/task-list.html @@ -0,0 +1,13 @@ +{% from "user/macro.html" import user_name %} +{% from "includes/task-list.html" import task_list %} + +{% extends "base.html" %} + +{% set page_title = "Incomplete Tasks" %} + +{% block content %} +

    {{ page_title }}

    + +{{ task_list(tasks, csrf_input) }} + +{% endblock %} diff --git a/controlpanel/frontend/jinja2/webapp-detail.html b/controlpanel/frontend/jinja2/webapp-detail.html index 7b7d0c3af..99155982d 100644 --- a/controlpanel/frontend/jinja2/webapp-detail.html +++ b/controlpanel/frontend/jinja2/webapp-detail.html @@ -12,8 +12,7 @@ {% set page_name = "webapps" %} {% set page_title = app.name %} {% set app_domain = settings.APP_DOMAIN %} -{% set app_old_url = "https://" + app.slug + "/" + settings.APP_DOMAIN_BEFORE_MIGRATION %} -{% set feature_enabled = settings.features.app_migration.enabled %} +{% set app_old_url = "https://" + app.slug + "." + settings.APP_DOMAIN_BEFORE_MIGRATION %} {% set app_admins_html %} {% include "modals/app_admins.html" %} @@ -26,54 +25,17 @@ {% block content %}
    - {% if feature_enabled %} -
    -
    - - - Warning - More information and user guidance for the webapp deployment pipeline and related settings will be provided soon. If you have questions about the app migration process, please contact us on the #data-platform-application-migration-support Slack channel. - -
    -
    -
    -
    - {% endif %} Webapp

    {{ page_title }}

    - {% if feature_enabled %} -

    Migration status:

    - {% if app_migration_status == "in_progress" %} -

    This app is currently being migrated. It has been deployed onto Cloud Platform for testing, and the original version is still available on the Alpha cluster.

    - -

    - The above information about old app will be removed once the migration process is completed. -

    - {% elif app_migration_status == "done" %} -

    - The migration for this app has been completed -

    - {% else %} -

    - The migration process for this app has not been started yet! -

    - {% endif %} - {% else %} - {% if app.description %} -

    - {{ app.description }} -

    - {% endif %} + {% if app.description %} +

    + {{ app.description }} +

    {% endif %} - - {% if feature_enabled and repo_access_error_msg %} + {% if repo_access_error_msg %}
    @@ -92,14 +54,6 @@

    {% endif %} - {% if not app_migration_info and app_old_url %} -

    Deployed URL

    -

    If the app was successfully built on the alpha (original) cluster, it can be accessed here:

    -

    - {{ app_old_url }} -

    - {% endif %} -

    Source Code Repository

    {% if app.repo_url %} @@ -109,53 +63,49 @@

    Source Code Repository

    {% endif %}

    - {% if feature_enabled %} - -

    App logs

    -

    - {{ settings.KIBANA_BASE_URL }} -

    +

    App logs

    +

    + {{ settings.KIBANA_BASE_URL }} +

    -

    App resources usage dashboard

    -

    - {{ settings.GRAFANA_BASE_URL }} -

    +

    App resources usage dashboard

    +

    + {{ settings.GRAFANA_BASE_URL }} +

    -

    Deployment Pipeline

    -

    Github workflows on app's repo are used for deploying the app.

    -

    You are required to be member of admin team for this app repo in order to be able to maintain the deployments settings

    - {% endif %} +

    Deployment Pipeline

    +

    Github workflows on app's repo are used for deploying the app.

    +

    You are required to be member of admin team for this app repo in order to be able to maintain the deployments settings

    -
    - {% if feature_enabled %} - {% if github_settings_access_error_msg %} - - {% endif %} +
    - {% for env_name, deployment_setting in deployments_settings.items() %} -

    Deployment settings under {{ env_name }}

    - {% if deployment_setting.get('is_redundant') and request.user.has_perm('api.update_app', app) %} -
    -

    It appears this deployment environment is redundant and can be removed

    -
    - {{ csrf_input }} - -
    -
    - {% else %} - {{ app_deployment_settings(app, env_name, app_domain, deployment_setting, request, csrf_input) }} - {% endif %} - {% endfor %} + {% if github_settings_access_error_msg %} + {% endif %} + {% for env_name, deployment_setting in deployments_settings.items() %} +

    Deployment settings under {{ env_name }}

    + {% if deployment_setting.get('is_redundant') and request.user.has_perm('api.update_app', app) %} +
    +

    It appears this deployment environment is redundant and can be removed

    +
    + {{ csrf_input }} + +
    +
    + {% else %} + {{ app_deployment_settings(app, env_name, app_domain, deployment_setting, request, csrf_input) }} + {% endif %} + {% endfor %} +
    diff --git a/controlpanel/frontend/jinja2/webapp-update-ip-allowlists.html b/controlpanel/frontend/jinja2/webapp-update-ip-allowlists.html index d3ecc5626..d772af857 100644 --- a/controlpanel/frontend/jinja2/webapp-update-ip-allowlists.html +++ b/controlpanel/frontend/jinja2/webapp-update-ip-allowlists.html @@ -14,8 +14,6 @@ {% endif %}
    - {% if app_migration_feature_enabled %} -
    {{ csrf_input }} @@ -44,12 +42,6 @@ contact the Analytical Platform team via our support Slack channel.

    - {% else %} - -

    IP allowlists for {{app.name}}

    -

    This feature is not yet available for general use.

    - - {% endif %}
    diff --git a/controlpanel/frontend/management/commands/celery_worker_health.py b/controlpanel/frontend/management/commands/celery_worker_health.py new file mode 100644 index 000000000..be1ed15a7 --- /dev/null +++ b/controlpanel/frontend/management/commands/celery_worker_health.py @@ -0,0 +1,57 @@ +# Standard library +import random +from datetime import datetime, timedelta +from pathlib import Path +from sys import exit + +# Third-party +from django.conf import settings +from django.core.management.base import BaseCommand + +# First-party/Local +from controlpanel.celery import worker_health_check + + +class Command(BaseCommand): + help = "Checks if this worker is still running tasks" + + def add_arguments(self, parser): + parser.add_argument( + "--stale-after-secs", + type=int, + default=120, + help="For how many seconds the last executed task is considered recent enough for the health check to pass", # noqa: E501 + ) + + def handle(self, *args, **options): + stale_after_secs = options["stale_after_secs"] + # send task to randomly chosen queue + worker_health_check.apply_async( + queue=random.choice(settings.PRE_DEFINED_QUEUES) + ) + # Attempt to read worker health ping file + # NOTE: This may initially fail depending on timing of health task + # execution but that's fine as Kubernetes' `failureThreashold` + # will be more than 1 anyway + try: + last_run_at_epoch = Path(settings.WORKER_HEALTH_FILENAME).stat().st_mtime + last_run_at = datetime.utcfromtimestamp(last_run_at_epoch) + except FileNotFoundError: + # Health ping file not found. Health task hasn't run on this worker yet + self.stderr.write(self.style.ERROR("Health ping file not found")) + exit(-1) + + self.stdout.write(f"Last run on this worker at: {last_run_at}") + + # check if this worker's health ping file is fresh/recent enough + if last_run_at < datetime.utcnow() - timedelta(seconds=stale_after_secs): + self.stderr.write(self.style.ERROR("Health ping file was stale")) + exit(-1) + + # Health ping file is fresh, success + self.stdout.write( + self.style.SUCCESS( + "Health ping file is fresh. This worker ran health task recently." + ) + ) + exit(0) diff --git a/controlpanel/frontend/management/commands/worker_health.py b/controlpanel/frontend/management/commands/worker_health.py index e40bee561..796dfae4b 100644 --- a/controlpanel/frontend/management/commands/worker_health.py +++ b/controlpanel/frontend/management/commands/worker_health.py @@ -7,9 +7,7 @@ from asgiref.sync import async_to_sync from channels.layers import get_channel_layer from django.core.management.base import BaseCommand - -# First-party/Local -from controlpanel.frontend.consumers import WORKER_HEALTH_FILENAME +from django.conf import settings class Command(BaseCommand): @@ -38,7 +36,7 @@ def handle(self, *args, **options): # execution but that's fine as Kubernetes' `failureThreashold` # will be more than 1 anyway try: - last_run_at_epoch = Path(WORKER_HEALTH_FILENAME).stat().st_mtime + last_run_at_epoch = Path(settings.WORKER_HEALTH_FILENAME).stat().st_mtime last_run_at = datetime.utcfromtimestamp(last_run_at_epoch) except FileNotFoundError: # Health ping file not found. Health task hasn't run on this worker yet diff --git a/controlpanel/frontend/static/javascripts/modules/confirm.js b/controlpanel/frontend/static/javascripts/modules/confirm.js index d9566f77b..f091d587a 100644 --- a/controlpanel/frontend/static/javascripts/modules/confirm.js +++ b/controlpanel/frontend/static/javascripts/modules/confirm.js @@ -18,7 +18,7 @@ moj.Modules.jsConfirm = { // works on any children of a `` with `confirmClass` but it's // usually used on `` or `