Skip to content

Alfresco Restore Docs Worker Process #6

Alfresco Restore Docs Worker Process

Alfresco Restore Docs Worker Process #6

Workflow file for this run

name: Alfresco Restore Docs Worker Process
on:
workflow_dispatch:
inputs:
which_env:
description: Environment where this restore docs process will run
required: true
type: choice
options:
- poc
s3_object_key:
description: S3 Object which needs to be restored
required: true
job_tier:
description: Glacier job tier (Expedited, Standard, Bulk)
required: false
default: 'Expedited'
permissions:
contents: read
jobs:
restore-docs-worker:
runs-on: ubuntu-latest
environment:
name: ${{ github.event.inputs.which_env }}
steps:
- name: Checkout current repo
uses: actions/checkout@v3
- name: Configure kubectl
run: |
echo "${{ secrets.KUBE_CERT }}" > ca.crt
kubectl config set-cluster ${KUBE_CLUSTER} --certificate-authority=./ca.crt --server=https://${KUBE_CLUSTER}
kubectl config set-credentials deploy-user --token=${{ secrets.KUBE_TOKEN }}
kubectl config set-context ${KUBE_CLUSTER} --cluster=${KUBE_CLUSTER} --user=deploy-user --namespace=${KUBE_NAMESPACE}
kubectl config use-context ${KUBE_CLUSTER}
env:
KUBE_NAMESPACE: ${{ secrets.KUBE_NAMESPACE }}
KUBE_CLUSTER: ${{ secrets.KUBE_CLUSTER }}
- name: Restore from Glacier by executing in the service pod
run: |
#!/bin/bash
set -xe
SERVICE_POD_DEPLOYMENT=$(kubectl get deployment -l app=service-pod -o jsonpath="{.items[0].metadata.name}")
SERVICE_POD_NAME=$(kubectl get pod -l app=$SERVICE_POD_DEPLOYMENT -o jsonpath="{.items[0].metadata.name}")
S3_BUCKET_NAME=$(kubectl get secrets s3-bucket-output -o jsonpath='{.data.BUCKET_NAME}' | base64 -d)
# Exec into the service pod and execute the script
kubectl exec $SERVICE_POD_NAME -- /bin/sh -c '
# Delete the delete marker versions
version_id=$(aws s3api list-object-versions --bucket "$S3_BUCKET_NAME" --prefix "${S3_OBJECT_KEY}" --query "Versions[?IsLatest==\`true\`].[VersionId]" --output text | jq -r ".[0]")
aws s3api delete-object --bucket "$S3_BUCKET_NAME" --key "${S3_OBJECT_KEY}" --version-id "$version_id"
echo "Deleted marker version: $version_id"
# Restore from Glacier
aws s3api restore-object --bucket "$S3_BUCKET_NAME" --key "${S3_OBJECT_KEY}" --restore-request "{\"Days\":1,\"GlacierJobParameters\":{\"Tier\":\"'$JOB_TIER'\"}}"
# Wait for restoration to complete
wait_interval=20
restore_status=$(aws s3api head-object --bucket "$S3_BUCKET_NAME" --key "$S3_OBJECT_KEY" | jq -r '.Restore')
if [[ "$restore_status" == *"ongoing-request=\"true\""* ]]; then
#restore in progress
echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} in progress. Please wait!"
sleep "$wait_interval"
fi
# Copy object within S3 bucket to update storage class
aws s3 cp "s3://$S3_BUCKET_NAME/${S3_OBJECT_KEY}" "s3://$S3_BUCKET_NAME/${S3_OBJECT_KEY}" --storage-class STANDARD
echo "Restore for object s3://${S3_BUCKET_NAME}/${S3_OBJECT_KEY} task complete."
' - "$JOB_TIER" "${{ github.event.inputs.s3_object_key }}"
env:
JOB_TIER: ${{ github.event.inputs.job_tier }}
S3_OBJECT_KEY: ${{ github.event.inputs.s3_object_key }}
# restore-docs-worker:
# name: Restore Docs from Glacier
# runs-on: ubuntu-22.04
# environment:
# name: ${{ github.event.inputs.which_env }}
# steps:
# - name: Check out code
# uses: actions/[email protected]
# - name: Configure kubectl
# run: |
# echo "${{ secrets.KUBE_CERT }}" > ca.crt
# kubectl config set-cluster ${KUBE_CLUSTER} --certificate-authority=./ca.crt --server=https://${KUBE_CLUSTER}
# kubectl config set-credentials deploy-user --token=${{ secrets.KUBE_TOKEN }}
# kubectl config set-context ${KUBE_CLUSTER} --cluster=${KUBE_CLUSTER} --user=deploy-user --namespace=${KUBE_NAMESPACE}
# kubectl config use-context ${KUBE_CLUSTER}
# env:
# KUBE_NAMESPACE: ${{ secrets.KUBE_NAMESPACE }}
# KUBE_CLUSTER: ${{ secrets.KUBE_CLUSTER }}
# - name: Create ConfigMap using the restore-docs-worker.sh script
# run: |
# kubectl create configmap restore-docs-worker-cm --from-file=scripts/restore-docs-worker.sh
# - name: Start Restore Docs Job
# run: |
# kubectl apply -f jobs/restore-docs-worker.yaml
# kubectl wait --timeout 10m --for=condition=complete job/restore-docs-worker
# - name: Delete Restore Docs Job
# run: kubectl delete job restore-docs-worker
# - name: Delete configmap
# run: kubectl delete cm restore-docs-worker-cm