From 270b0750c4fda8ecee3cb92c7f3beee87d03dc2d Mon Sep 17 00:00:00 2001 From: George Taylor Date: Tue, 7 May 2024 23:20:12 +0100 Subject: [PATCH 1/7] =?UTF-8?q?=F0=9F=8E=89=20Initial=20workflow=20for=20s?= =?UTF-8?q?napshot=20creation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/create-pv-restore-point.yml | 114 ++++++++++++++++++ .github/workflows/create-rds-snapshot.yml | 60 +++++++++ jobs/backup-pvc/Chart.yaml | 5 + jobs/backup-pvc/templates/job.yaml | 78 ++++++++++++ 4 files changed, 257 insertions(+) create mode 100644 .github/workflows/create-pv-restore-point.yml create mode 100644 .github/workflows/create-rds-snapshot.yml create mode 100644 jobs/backup-pvc/Chart.yaml create mode 100644 jobs/backup-pvc/templates/job.yaml diff --git a/.github/workflows/create-pv-restore-point.yml b/.github/workflows/create-pv-restore-point.yml new file mode 100644 index 0000000..18a3032 --- /dev/null +++ b/.github/workflows/create-pv-restore-point.yml @@ -0,0 +1,114 @@ +name: Alfresco Restore Docs Worker Process + +on: + workflow_dispatch: + inputs: + which_env: + description: Environment where this restore docs process will run + required: true + type: choice + options: + - poc + s3_object_key: + description: S3 Object which needs to be restored + required: true + job_tier: + description: Glacier job tier (Expedited, Standard, Bulk) + required: false + default: 'Expedited' + +permissions: + contents: read + +jobs: + restore-docs-worker: + runs-on: ubuntu-latest + + steps: + - name: Checkout current repo + uses: actions/checkout@v3 + + - name: Configure kubectl + run: | + echo "${{ secrets.KUBE_CERT }}" > ca.crt + kubectl config set-cluster ${KUBE_CLUSTER} --certificate-authority=./ca.crt --server=https://${KUBE_CLUSTER} + kubectl config set-credentials deploy-user --token=${{ secrets.KUBE_TOKEN }} + kubectl config set-context ${KUBE_CLUSTER} --cluster=${KUBE_CLUSTER} --user=deploy-user --namespace=${KUBE_NAMESPACE} + kubectl config use-context ${KUBE_CLUSTER} + env: + KUBE_NAMESPACE: ${{ secrets.KUBE_NAMESPACE }} + KUBE_CLUSTER: ${{ secrets.KUBE_CLUSTER }} + + - name: Restore from Glacier by executing in the service pod + run: | + + #!/bin/bash + set -xe + + local SERVICE_POD_DEPLOYMENT=$(kubectl get deployment -l app=service-pod -o jsonpath="{.items[0].metadata.name}") + local SERVICE_POD_NAME=$(kubectl get pod -l app=$SERVICE_POD_DEPLOYMENT -o jsonpath="{.items[0].metadata.name}") + + local S3_BUCKET_NAME=$(kubectl get secrets s3-bucket-output -o jsonpath='{.data.BUCKET_NAME}' | base64 -d) + + # Exec into the service pod and execute the script + kubectl exec $SERVICE_POD_NAME -- /bin/sh -c ' + # Delete the delete marker versions + local version_id=$(aws s3api list-object-versions --bucket "$S3_BUCKET_NAME" --prefix "${S3_OBJECT_KEY}" --query "Versions[?IsLatest==\`true\`].[VersionId]" --output text | jq -r ".[0]") + aws s3api delete-object --bucket "$S3_BUCKET_NAME" --key "${S3_OBJECT_KEY}" --version-id "$version_id" + echo "Deleted marker version: $version_id" + + # Restore from Glacier + aws s3api restore-object --bucket "$S3_BUCKET_NAME" --key "${S3_OBJECT_KEY}" --restore-request "{\"Days\":1,\"GlacierJobParameters\":{\"Tier\":\"'$JOB_TIER'\"}}" + + # Wait for restoration to complete + local wait_interval=20 + local restore_status=$(aws s3api head-object --bucket "$S3_BUCKET_NAME" --key "$S3_OBJECT_KEY" | jq -r '.Restore') + if [[ "$restore_status" == *"ongoing-request=\"true\""* ]]; then + #restore in progress + echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} in progress. Please wait!" + sleep "$wait_interval" + fi + + # Copy object within S3 bucket to update storage class + aws s3 cp "s3://$S3_BUCKET_NAME/${S3_OBJECT_KEY}" "s3://$S3_BUCKET_NAME/${S3_OBJECT_KEY}" --storage-class STANDARD + + echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} task complete." + ' - "$JOB_TIER" "${{ github.event.inputs.s3_object_key }}" + env: + JOB_TIER: ${{ github.event.inputs.job_tier }} + S3_OBJECT_KEY: ${{ github.event.inputs.s3_object_key }} + + # restore-docs-worker: + # name: Restore Docs from Glacier + # runs-on: ubuntu-22.04 + # environment: + # name: ${{ github.event.inputs.which_env }} + # steps: + # - name: Check out code + # uses: actions/checkout@v4.1.1 + + # - name: Configure kubectl + # run: | + # echo "${{ secrets.KUBE_CERT }}" > ca.crt + # kubectl config set-cluster ${KUBE_CLUSTER} --certificate-authority=./ca.crt --server=https://${KUBE_CLUSTER} + # kubectl config set-credentials deploy-user --token=${{ secrets.KUBE_TOKEN }} + # kubectl config set-context ${KUBE_CLUSTER} --cluster=${KUBE_CLUSTER} --user=deploy-user --namespace=${KUBE_NAMESPACE} + # kubectl config use-context ${KUBE_CLUSTER} + # env: + # KUBE_NAMESPACE: ${{ secrets.KUBE_NAMESPACE }} + # KUBE_CLUSTER: ${{ secrets.KUBE_CLUSTER }} + + # - name: Create ConfigMap using the restore-docs-worker.sh script + # run: | + # kubectl create configmap restore-docs-worker-cm --from-file=scripts/restore-docs-worker.sh + + # - name: Start Restore Docs Job + # run: | + # kubectl apply -f jobs/restore-docs-worker.yaml + # kubectl wait --timeout 10m --for=condition=complete job/restore-docs-worker + + # - name: Delete Restore Docs Job + # run: kubectl delete job restore-docs-worker + + # - name: Delete configmap + # run: kubectl delete cm restore-docs-worker-cm \ No newline at end of file diff --git a/.github/workflows/create-rds-snapshot.yml b/.github/workflows/create-rds-snapshot.yml new file mode 100644 index 0000000..233ad5e --- /dev/null +++ b/.github/workflows/create-rds-snapshot.yml @@ -0,0 +1,60 @@ +name: Alfresco Restore Docs Worker Process + +on: + workflow_dispatch: + inputs: + which_env: + description: Environment where this restore docs process will run + required: true + type: choice + options: + - poc + +permissions: + contents: read + +jobs: + restore-docs-worker: + runs-on: ubuntu-latest + + steps: + - name: Checkout current repo + uses: actions/checkout@v3 + + - name: Configure kubectl + run: | + echo "${{ secrets.KUBE_CERT }}" > ca.crt + kubectl config set-cluster ${KUBE_CLUSTER} --certificate-authority=./ca.crt --server=https://${KUBE_CLUSTER} + kubectl config set-credentials deploy-user --token=${{ secrets.KUBE_TOKEN }} + kubectl config set-context ${KUBE_CLUSTER} --cluster=${KUBE_CLUSTER} --user=deploy-user --namespace=${KUBE_NAMESPACE} + kubectl config use-context ${KUBE_CLUSTER} + env: + KUBE_NAMESPACE: ${{ secrets.KUBE_NAMESPACE }} + KUBE_CLUSTER: ${{ secrets.KUBE_CLUSTER }} + + - name: Generate snapshot name + id: snapshot_name + run: echo "snapshot_name=hmpps-delius-alfresco-${{ inputs.which_env }}-$(date +%Y%m%d%H%M%S)" >> $GITHUB_OUTPUT + + - name: Create RDS Snapshot + run: | + + #!/bin/bash + set -xe + + local SERVICE_POD_DEPLOYMENT=$(kubectl get deployment -l app=service-pod -o jsonpath="{.items[0].metadata.name}") + local SERVICE_POD_NAME=$(kubectl get pod -l app=$SERVICE_POD_DEPLOYMENT -o jsonpath="{.items[0].metadata.name}") + + local RDS_INSTANCE_IDENTIFIER=$(kubectl get secrets rds-instance-output -o jsonpath='{.data.RDS_INSTANCE_IDENTIFIER}' | base64 -d) + + # Exec into the service pod and execute the script + kubectl exec $SERVICE_POD_NAME -- /bin/sh -c 'aws rds create-db-snapshot --db-instance-identifier $RDS_INSTANCE_IDENTIFIER --db-snapshot-identifier ${{ steps.snapshot_name.outputs.snapshot_name}}' + + # wait for the snapshot to be created + + kubectl exec $SERVICE_POD_NAME -- /bin/sh -c 'aws rds wait db-snapshot-completed --db-snapshot-identifier ${{ steps.snapshot_name.outputs.snapshot_name}}' + + - name: Output Snapshot Name + run: | + echo "Snapshot Name: ${{ steps.snapshot_name.outputs.snapshot_name }}" + \ No newline at end of file diff --git a/jobs/backup-pvc/Chart.yaml b/jobs/backup-pvc/Chart.yaml new file mode 100644 index 0000000..30f8165 --- /dev/null +++ b/jobs/backup-pvc/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +appVersion: 0.1 +version: 0.0.1 +description: Jobs backup PVC +name: backup-pvc diff --git a/jobs/backup-pvc/templates/job.yaml b/jobs/backup-pvc/templates/job.yaml new file mode 100644 index 0000000..cece01d --- /dev/null +++ b/jobs/backup-pvc/templates/job.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: refresh-s3-script +data: + entrypoint.sh: |- + #!/bin/sh + set -xe + + aws configure set default.s3.max_concurrent_requests 2000 + + date=$(date +%Y%m%d%H%M%S) + + aws s3 sync /pvc_mount s3://$DST_BUCKET/snapshot-${date} --delete --no-progress --only-show-errors + + set +x + echo sync of pvc mount directory completed + +{{- range .Values.directories }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: solr-backup-pvc-{{ . | toString | replace "/" "-" }} + labels: + name-prefix: solr-backup-pvc +spec: + template: + spec: + containers: + - name: backup-pvc + image: 754256621582.dkr.ecr.eu-west-2.amazonaws.com/webops/cloud-platform-service-pod:c5f69b4624b956248001fa7c173c89a0556a457e + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 4 + memory: 8Gi + command: + - /bin/entrypoint.sh + env: + - name: DST_BUCKET + valueFrom: + secretKeyRef: + name: s3-bucket-solr-backup-pvc-output + key: BUCKET_NAME + - name: DIR + value: {{ . | quote }} + volumeMounts: + - name: solr-backup-pvc-script + mountPath: /bin/entrypoint.sh + readOnly: true + subPath: entrypoint.sh + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 1001 + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + serviceAccount: hmpps-migration-development + serviceAccountName: hmpps-migration-development + restartPolicy: OnFailure + volumes: + - name: solr-backup-pvc-script + configMap: + name: solr-backup-pvc-script + defaultMode: 0755 + - name: pvc-mount + persistentVolumeClaim: + claimName: # not sure how to get this + backoffLimit: 10 +{{- end }} +... From 950c516233b83909e19bb9b1c11b9b9d65513673 Mon Sep 17 00:00:00 2001 From: George Taylor Date: Tue, 7 May 2024 23:21:10 +0100 Subject: [PATCH 2/7] Delete create-pv-restore-point.yml --- .github/workflows/create-pv-restore-point.yml | 114 ------------------ 1 file changed, 114 deletions(-) delete mode 100644 .github/workflows/create-pv-restore-point.yml diff --git a/.github/workflows/create-pv-restore-point.yml b/.github/workflows/create-pv-restore-point.yml deleted file mode 100644 index 18a3032..0000000 --- a/.github/workflows/create-pv-restore-point.yml +++ /dev/null @@ -1,114 +0,0 @@ -name: Alfresco Restore Docs Worker Process - -on: - workflow_dispatch: - inputs: - which_env: - description: Environment where this restore docs process will run - required: true - type: choice - options: - - poc - s3_object_key: - description: S3 Object which needs to be restored - required: true - job_tier: - description: Glacier job tier (Expedited, Standard, Bulk) - required: false - default: 'Expedited' - -permissions: - contents: read - -jobs: - restore-docs-worker: - runs-on: ubuntu-latest - - steps: - - name: Checkout current repo - uses: actions/checkout@v3 - - - name: Configure kubectl - run: | - echo "${{ secrets.KUBE_CERT }}" > ca.crt - kubectl config set-cluster ${KUBE_CLUSTER} --certificate-authority=./ca.crt --server=https://${KUBE_CLUSTER} - kubectl config set-credentials deploy-user --token=${{ secrets.KUBE_TOKEN }} - kubectl config set-context ${KUBE_CLUSTER} --cluster=${KUBE_CLUSTER} --user=deploy-user --namespace=${KUBE_NAMESPACE} - kubectl config use-context ${KUBE_CLUSTER} - env: - KUBE_NAMESPACE: ${{ secrets.KUBE_NAMESPACE }} - KUBE_CLUSTER: ${{ secrets.KUBE_CLUSTER }} - - - name: Restore from Glacier by executing in the service pod - run: | - - #!/bin/bash - set -xe - - local SERVICE_POD_DEPLOYMENT=$(kubectl get deployment -l app=service-pod -o jsonpath="{.items[0].metadata.name}") - local SERVICE_POD_NAME=$(kubectl get pod -l app=$SERVICE_POD_DEPLOYMENT -o jsonpath="{.items[0].metadata.name}") - - local S3_BUCKET_NAME=$(kubectl get secrets s3-bucket-output -o jsonpath='{.data.BUCKET_NAME}' | base64 -d) - - # Exec into the service pod and execute the script - kubectl exec $SERVICE_POD_NAME -- /bin/sh -c ' - # Delete the delete marker versions - local version_id=$(aws s3api list-object-versions --bucket "$S3_BUCKET_NAME" --prefix "${S3_OBJECT_KEY}" --query "Versions[?IsLatest==\`true\`].[VersionId]" --output text | jq -r ".[0]") - aws s3api delete-object --bucket "$S3_BUCKET_NAME" --key "${S3_OBJECT_KEY}" --version-id "$version_id" - echo "Deleted marker version: $version_id" - - # Restore from Glacier - aws s3api restore-object --bucket "$S3_BUCKET_NAME" --key "${S3_OBJECT_KEY}" --restore-request "{\"Days\":1,\"GlacierJobParameters\":{\"Tier\":\"'$JOB_TIER'\"}}" - - # Wait for restoration to complete - local wait_interval=20 - local restore_status=$(aws s3api head-object --bucket "$S3_BUCKET_NAME" --key "$S3_OBJECT_KEY" | jq -r '.Restore') - if [[ "$restore_status" == *"ongoing-request=\"true\""* ]]; then - #restore in progress - echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} in progress. Please wait!" - sleep "$wait_interval" - fi - - # Copy object within S3 bucket to update storage class - aws s3 cp "s3://$S3_BUCKET_NAME/${S3_OBJECT_KEY}" "s3://$S3_BUCKET_NAME/${S3_OBJECT_KEY}" --storage-class STANDARD - - echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} task complete." - ' - "$JOB_TIER" "${{ github.event.inputs.s3_object_key }}" - env: - JOB_TIER: ${{ github.event.inputs.job_tier }} - S3_OBJECT_KEY: ${{ github.event.inputs.s3_object_key }} - - # restore-docs-worker: - # name: Restore Docs from Glacier - # runs-on: ubuntu-22.04 - # environment: - # name: ${{ github.event.inputs.which_env }} - # steps: - # - name: Check out code - # uses: actions/checkout@v4.1.1 - - # - name: Configure kubectl - # run: | - # echo "${{ secrets.KUBE_CERT }}" > ca.crt - # kubectl config set-cluster ${KUBE_CLUSTER} --certificate-authority=./ca.crt --server=https://${KUBE_CLUSTER} - # kubectl config set-credentials deploy-user --token=${{ secrets.KUBE_TOKEN }} - # kubectl config set-context ${KUBE_CLUSTER} --cluster=${KUBE_CLUSTER} --user=deploy-user --namespace=${KUBE_NAMESPACE} - # kubectl config use-context ${KUBE_CLUSTER} - # env: - # KUBE_NAMESPACE: ${{ secrets.KUBE_NAMESPACE }} - # KUBE_CLUSTER: ${{ secrets.KUBE_CLUSTER }} - - # - name: Create ConfigMap using the restore-docs-worker.sh script - # run: | - # kubectl create configmap restore-docs-worker-cm --from-file=scripts/restore-docs-worker.sh - - # - name: Start Restore Docs Job - # run: | - # kubectl apply -f jobs/restore-docs-worker.yaml - # kubectl wait --timeout 10m --for=condition=complete job/restore-docs-worker - - # - name: Delete Restore Docs Job - # run: kubectl delete job restore-docs-worker - - # - name: Delete configmap - # run: kubectl delete cm restore-docs-worker-cm \ No newline at end of file From 8bbe321d4da5b3d612de126859f8e9ec02b0915f Mon Sep 17 00:00:00 2001 From: George Taylor Date: Wed, 8 May 2024 14:57:13 +0100 Subject: [PATCH 3/7] compress files to a tar.gz and copy to dedicated bucket --- jobs/backup-pvc/templates/job.yaml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/jobs/backup-pvc/templates/job.yaml b/jobs/backup-pvc/templates/job.yaml index cece01d..57ff350 100644 --- a/jobs/backup-pvc/templates/job.yaml +++ b/jobs/backup-pvc/templates/job.yaml @@ -12,10 +12,14 @@ data: date=$(date +%Y%m%d%H%M%S) - aws s3 sync /pvc_mount s3://$DST_BUCKET/snapshot-${date} --delete --no-progress --only-show-errors + cd /pvc-mount/alfresco-content-services/solr-data + + tar -czf /tmp/solr-data-${date}.tar.gz alfresco alfrescoModels archive + + aws s3 cp /tmp/solr-data-${date}.tar.gz s3://$DST_BUCKET/solr-data-${date}.tar.gz --delete --no-progress --only-show-errors set +x - echo sync of pvc mount directory completed + echo backup of pvc mount directory completed {{- range .Values.directories }} --- @@ -72,7 +76,6 @@ spec: defaultMode: 0755 - name: pvc-mount persistentVolumeClaim: - claimName: # not sure how to get this + claimName: alfresco-content-services-alfresco-search-solr-claim backoffLimit: 10 {{- end }} -... From a9aacf238a8e3ab534734fed3618d456d30bbd5c Mon Sep 17 00:00:00 2001 From: George Taylor Date: Wed, 8 May 2024 14:58:36 +0100 Subject: [PATCH 4/7] correct s3 bucket secret key --- jobs/backup-pvc/templates/job.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/jobs/backup-pvc/templates/job.yaml b/jobs/backup-pvc/templates/job.yaml index 57ff350..49b3ec7 100644 --- a/jobs/backup-pvc/templates/job.yaml +++ b/jobs/backup-pvc/templates/job.yaml @@ -16,7 +16,7 @@ data: tar -czf /tmp/solr-data-${date}.tar.gz alfresco alfrescoModels archive - aws s3 cp /tmp/solr-data-${date}.tar.gz s3://$DST_BUCKET/solr-data-${date}.tar.gz --delete --no-progress --only-show-errors + aws s3 cp /tmp/solr-data-${date}.tar.gz s3://$DST_BUCKET/solr-data-${date}.tar.gz --no-progress --only-show-errors set +x echo backup of pvc mount directory completed @@ -46,7 +46,7 @@ spec: - name: DST_BUCKET valueFrom: secretKeyRef: - name: s3-bucket-solr-backup-pvc-output + name: s3-backups-bucket-output key: BUCKET_NAME - name: DIR value: {{ . | quote }} From 21d35d6ee5b1f64c59b1db858c931aa8189b0a87 Mon Sep 17 00:00:00 2001 From: George Taylor Date: Wed, 8 May 2024 23:59:51 +0100 Subject: [PATCH 5/7] remove yaml loop --- jobs/backup-pvc/templates/job.yaml | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/jobs/backup-pvc/templates/job.yaml b/jobs/backup-pvc/templates/job.yaml index 49b3ec7..cc0be0f 100644 --- a/jobs/backup-pvc/templates/job.yaml +++ b/jobs/backup-pvc/templates/job.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: refresh-s3-script + name: backup-pvc-script data: entrypoint.sh: |- #!/bin/sh @@ -21,12 +21,11 @@ data: set +x echo backup of pvc mount directory completed -{{- range .Values.directories }} --- apiVersion: batch/v1 kind: Job metadata: - name: solr-backup-pvc-{{ . | toString | replace "/" "-" }} + name: solr-backup-pvc labels: name-prefix: solr-backup-pvc spec: @@ -48,10 +47,8 @@ spec: secretKeyRef: name: s3-backups-bucket-output key: BUCKET_NAME - - name: DIR - value: {{ . | quote }} volumeMounts: - - name: solr-backup-pvc-script + - name: backup-pvc-script mountPath: /bin/entrypoint.sh readOnly: true subPath: entrypoint.sh @@ -66,16 +63,16 @@ spec: - ALL seccompProfile: type: RuntimeDefault - serviceAccount: hmpps-migration-development - serviceAccountName: hmpps-migration-development + serviceAccount: hmpps-migration-poc + serviceAccountName: hmpps-migration-poc restartPolicy: OnFailure volumes: - - name: solr-backup-pvc-script + - name: backup-pvc-script configMap: - name: solr-backup-pvc-script + name: backup-pvc-script defaultMode: 0755 - name: pvc-mount persistentVolumeClaim: claimName: alfresco-content-services-alfresco-search-solr-claim + backoffLimit: 10 -{{- end }} From 3cc18c9ccd2b031cd86897eb645d32314b334279 Mon Sep 17 00:00:00 2001 From: George Taylor Date: Tue, 14 May 2024 16:27:30 +0100 Subject: [PATCH 6/7] =?UTF-8?q?=F0=9F=94=A5=20Remove=20solr=20backup=20job?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- jobs/backup-pvc/Chart.yaml | 5 -- jobs/backup-pvc/templates/job.yaml | 78 ------------------------------ 2 files changed, 83 deletions(-) delete mode 100644 jobs/backup-pvc/Chart.yaml delete mode 100644 jobs/backup-pvc/templates/job.yaml diff --git a/jobs/backup-pvc/Chart.yaml b/jobs/backup-pvc/Chart.yaml deleted file mode 100644 index 30f8165..0000000 --- a/jobs/backup-pvc/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v2 -appVersion: 0.1 -version: 0.0.1 -description: Jobs backup PVC -name: backup-pvc diff --git a/jobs/backup-pvc/templates/job.yaml b/jobs/backup-pvc/templates/job.yaml deleted file mode 100644 index cc0be0f..0000000 --- a/jobs/backup-pvc/templates/job.yaml +++ /dev/null @@ -1,78 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: backup-pvc-script -data: - entrypoint.sh: |- - #!/bin/sh - set -xe - - aws configure set default.s3.max_concurrent_requests 2000 - - date=$(date +%Y%m%d%H%M%S) - - cd /pvc-mount/alfresco-content-services/solr-data - - tar -czf /tmp/solr-data-${date}.tar.gz alfresco alfrescoModels archive - - aws s3 cp /tmp/solr-data-${date}.tar.gz s3://$DST_BUCKET/solr-data-${date}.tar.gz --no-progress --only-show-errors - - set +x - echo backup of pvc mount directory completed - ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: solr-backup-pvc - labels: - name-prefix: solr-backup-pvc -spec: - template: - spec: - containers: - - name: backup-pvc - image: 754256621582.dkr.ecr.eu-west-2.amazonaws.com/webops/cloud-platform-service-pod:c5f69b4624b956248001fa7c173c89a0556a457e - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 4 - memory: 8Gi - command: - - /bin/entrypoint.sh - env: - - name: DST_BUCKET - valueFrom: - secretKeyRef: - name: s3-backups-bucket-output - key: BUCKET_NAME - volumeMounts: - - name: backup-pvc-script - mountPath: /bin/entrypoint.sh - readOnly: true - subPath: entrypoint.sh - securityContext: - allowPrivilegeEscalation: false - privileged: false - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 1001 - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - serviceAccount: hmpps-migration-poc - serviceAccountName: hmpps-migration-poc - restartPolicy: OnFailure - volumes: - - name: backup-pvc-script - configMap: - name: backup-pvc-script - defaultMode: 0755 - - name: pvc-mount - persistentVolumeClaim: - claimName: alfresco-content-services-alfresco-search-solr-claim - - backoffLimit: 10 From 90f2c995adf04a0aa1c3c87a5f22d5da4b11d2fd Mon Sep 17 00:00:00 2001 From: George Taylor Date: Wed, 15 May 2024 11:01:10 +0100 Subject: [PATCH 7/7] typo --- .github/workflows/create-rds-snapshot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/create-rds-snapshot.yml b/.github/workflows/create-rds-snapshot.yml index 233ad5e..77bcc5d 100644 --- a/.github/workflows/create-rds-snapshot.yml +++ b/.github/workflows/create-rds-snapshot.yml @@ -56,5 +56,5 @@ jobs: - name: Output Snapshot Name run: | - echo "Snapshot Name: ${{ steps.snapshot_name.outputs.snapshot_name }}" + echo "Snapshot Name: ${{ steps.snapshot_name.outputs.snapshot_name }}" \ No newline at end of file