Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Nit 1120 alfresco recover deleted documents #44

Merged
merged 12 commits into from
Apr 24, 2024
114 changes: 114 additions & 0 deletions .github/workflows/restore-docs.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
name: Alfresco Restore Docs Worker Process

on:
workflow_dispatch:
inputs:
which_env:
description: Environment where this restore docs process will run
required: true
type: choice
options:
- poc
s3_object_key:
description: S3 Object which needs to be restored
required: true
job_tier:
description: Glacier job tier (Expedited, Standard, Bulk)
required: false
default: 'Expedited'

permissions:
contents: read

jobs:
restore-docs-worker:
runs-on: ubuntu-latest

steps:
- name: Checkout current repo
uses: actions/checkout@v3

- name: Configure kubectl
run: |
echo "${{ secrets.KUBE_CERT }}" > ca.crt
kubectl config set-cluster ${KUBE_CLUSTER} --certificate-authority=./ca.crt --server=https://${KUBE_CLUSTER}
kubectl config set-credentials deploy-user --token=${{ secrets.KUBE_TOKEN }}
kubectl config set-context ${KUBE_CLUSTER} --cluster=${KUBE_CLUSTER} --user=deploy-user --namespace=${KUBE_NAMESPACE}
kubectl config use-context ${KUBE_CLUSTER}
env:
KUBE_NAMESPACE: ${{ secrets.KUBE_NAMESPACE }}
KUBE_CLUSTER: ${{ secrets.KUBE_CLUSTER }}

- name: Restore from Glacier by executing in the service pod
run: |

#!/bin/bash
set -xe

local SERVICE_POD_DEPLOYMENT=$(kubectl get deployment -l app=service-pod -o jsonpath="{.items[0].metadata.name}")
local SERVICE_POD_NAME=$(kubectl get pod -l app=$SERVICE_POD_DEPLOYMENT -o jsonpath="{.items[0].metadata.name}")

local S3_BUCKET_NAME=$(kubectl get secrets s3-bucket-output -o jsonpath='{.data.BUCKET_NAME}' | base64 -d)

# Exec into the service pod and execute the script
kubectl exec $SERVICE_POD_NAME -- /bin/sh -c '
# Delete the delete marker versions
local version_id=$(aws s3api list-object-versions --bucket "$S3_BUCKET_NAME" --prefix "${S3_OBJECT_KEY}" --query "Versions[?IsLatest==\`true\`].[VersionId]" --output text | jq -r ".[0]")
aws s3api delete-object --bucket "$S3_BUCKET_NAME" --key "${S3_OBJECT_KEY}" --version-id "$version_id"
echo "Deleted marker version: $version_id"

# Restore from Glacier
aws s3api restore-object --bucket "$S3_BUCKET_NAME" --key "${S3_OBJECT_KEY}" --restore-request "{\"Days\":1,\"GlacierJobParameters\":{\"Tier\":\"'$JOB_TIER'\"}}"

# Wait for restoration to complete
local wait_interval=20
local restore_status=$(aws s3api head-object --bucket "$S3_BUCKET_NAME" --key "$S3_OBJECT_KEY" | jq -r '.Restore')
if [[ "$restore_status" == *"ongoing-request=\"true\""* ]]; then
#restore in progress
echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} in progress. Please wait!"
sleep "$wait_interval"
fi

# Copy object within S3 bucket to update storage class
aws s3 cp "s3://$S3_BUCKET_NAME/${S3_OBJECT_KEY}" "s3://$S3_BUCKET_NAME/${S3_OBJECT_KEY}" --storage-class STANDARD

echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} task complete."
' - "$JOB_TIER" "${{ github.event.inputs.s3_object_key }}"
env:
JOB_TIER: ${{ github.event.inputs.job_tier }}
S3_OBJECT_KEY: ${{ github.event.inputs.s3_object_key }}

# restore-docs-worker:
# name: Restore Docs from Glacier
# runs-on: ubuntu-22.04
# environment:
# name: ${{ github.event.inputs.which_env }}
# steps:
# - name: Check out code
# uses: actions/[email protected]

# - name: Configure kubectl
# run: |
# echo "${{ secrets.KUBE_CERT }}" > ca.crt
# kubectl config set-cluster ${KUBE_CLUSTER} --certificate-authority=./ca.crt --server=https://${KUBE_CLUSTER}
# kubectl config set-credentials deploy-user --token=${{ secrets.KUBE_TOKEN }}
# kubectl config set-context ${KUBE_CLUSTER} --cluster=${KUBE_CLUSTER} --user=deploy-user --namespace=${KUBE_NAMESPACE}
# kubectl config use-context ${KUBE_CLUSTER}
# env:
# KUBE_NAMESPACE: ${{ secrets.KUBE_NAMESPACE }}
# KUBE_CLUSTER: ${{ secrets.KUBE_CLUSTER }}

# - name: Create ConfigMap using the restore-docs-worker.sh script
# run: |
# kubectl create configmap restore-docs-worker-cm --from-file=scripts/restore-docs-worker.sh

# - name: Start Restore Docs Job
# run: |
# kubectl apply -f jobs/restore-docs-worker.yaml
# kubectl wait --timeout 10m --for=condition=complete job/restore-docs-worker

# - name: Delete Restore Docs Job
# run: kubectl delete job restore-docs-worker

# - name: Delete configmap
# run: kubectl delete cm restore-docs-worker-cm
43 changes: 43 additions & 0 deletions jobs/restore-docs-worker.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
apiVersion: batch/v1
kind: Job
metadata:
name: restore-docs-worker
spec:
template:
spec:
containers:
- name: restore-docs-worker
image: 754256621582.dkr.ecr.eu-west-2.amazonaws.com/webops/cloud-platform-service-pod:c5f69b4624b956248001fa7c173c89a0556a457e
imagePullPolicy: IfNotPresent
command: ["/bin/bash", "/scripts/restore-docs-worker.sh"]
env:
- name: BUCKET_NAME
valueFrom:
secretKeyRef:
name: s3-bucket-output
key: BUCKET_NAME
- name: OBJECT_KEY
valueFrom:
secretKeyRef:
name: s3-bucket-output-poc
key: BUCKET_NAME
- name: AWS_REGION
value: "eu-west-2"
volumeMounts:
- name: restore-docs-worker-volume
mountPath: /scripts
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: false
runAsNonRoot: true
runAsUser: 1001
volumes:
- name: restore-docs-worker-volume
configMap:
name: restore-docs-worker-cm
defaultMode: 0755
serviceAccount: hmpps-migration-development
serviceAccountName: hmpps-migration-development
restartPolicy: Never
backoffLimit: 0
72 changes: 72 additions & 0 deletions scripts/restore-docs-worker.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
#!/bin/bash

restore_from_glacier() {
local S3_BUCKET_NAME=$1
local S3_OBJECT_KEY=$2
local JOB_TIER=${3:-Expedited}

aws s3api restore-object \
--bucket "$S3_BUCKET_NAME" \
--key "$S3_OBJECT_KEY" \
--restore-request '{"Days":1,"GlacierJobParameters":{"Tier":"'"$job_tier"'"}}'
}

check_restore_status() {
local S3_BUCKET_NAME=$1
local S3_OBJECT_KEY=$2

local restore_status=$(aws s3api head-object --bucket "$S3_BUCKET_NAME" --key "$S3_OBJECT_KEY" | jq -r '.Restore')
if [[ "$restore_status" == *"ongoing-request=\"true\""* ]]; then
return 0 #restore in progress
else
return 1 #restore complete
fi
}

copy_s3_object() {
local S3_BUCKET_NAME=$1
local S3_OBJECT_KEY=$2

# Copy object within S3 bucket to update storage class
aws s3 cp "s3://$S3_BUCKET_NAME/$S3_OBJECT_KEY" "s3://$S3_BUCKET_NAME/$S3_OBJECT_KEY" --storage-class STANDARD
}

lambda_handler() {
local S3_BUCKET_NAME=$BUCKET_NAME
local S3_OBJECT_KEY=$OBJECT_KEY

if [[ -z "$S3_BUCKET_NAME" || -z "$S3_OBJECT_KEY" ]]; then
echo "Please provide bucket name and object key"
exit 1
fi

local object_versions=$(aws s3api list-object-versions --bucket "$S3_BUCKET_NAME" --prefix "$S3_OBJECT_KEY")
if [[ -z "$object_versions" ]]; then
echo "Object not found in bucket"
exit 1
fi

local delete_markers=$(jq -r '.DeleteMarkers' <<< "$object_versions")
if [[ -n "$delete_markers" ]]; then
local version_id=$(jq -r '.[0].VersionId' <<< "$delete_markers")
aws s3api delete-object --bucket "$S3_BUCKET_NAME" --key "$S3_OBJECT_KEY" --version-id "$version_id"
echo "Deleted marker version: $version_id"
fi

# Restore object from Glacier
restore_from_glacier "$S3_BUCKET_NAME" "$S3_OBJECT_KEY"

# Wait for restoration to complete
local wait_interval=20
while check_restore_status "$S3_BUCKET_NAME" "$S3_OBJECT_KEY"; do
echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} in progress. Please wait!"
sleep "$wait_interval"
done

# Copy object within S3 bucket to update storage class
copy_s3_object "$S3_BUCKET_NAME" "$S3_OBJECT_KEY"

echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} task complete."
}

lambda_handler
Loading