Skip to content

Commit

Permalink
velero should never use the disable s3 codepath without RWX volumes (#…
Browse files Browse the repository at this point in the history
…4808)

* require the rook-cephfs or longhorn storageclasses to use pvc-backed velero

* airgap k8s version
  • Loading branch information
laverya authored Sep 7, 2023
1 parent ada5f03 commit 4676715
Show file tree
Hide file tree
Showing 3 changed files with 37 additions and 35 deletions.
35 changes: 18 additions & 17 deletions addons/velero/1.11.1/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ function velero() {

kubectl label -n default --overwrite service/kubernetes velero.io/exclude-from-backup=true

# Bail if the migrationn fails, preventing the original object store from being deleted
# Bail if the migration fails, preventing the original object store from being deleted
if velero_did_migrate_from_object_store; then
logWarn "Velero will migrate from object store to pvc"
if ! try_5m velero_pvc_migrated ; then
Expand Down Expand Up @@ -95,7 +95,7 @@ function velero_join() {
}

function velero_host_init() {
velero_install_nfs_utils_if_missing
velero_install_nfs_utils_if_missing
}

function velero_install_nfs_utils_if_missing() {
Expand Down Expand Up @@ -142,8 +142,9 @@ function velero_install() {
local bslArgs="--no-default-backup-location"
if ! kubernetes_resource_exists "$VELERO_NAMESPACE" backupstoragelocation default; then

# Only use the PVC backup location for new installs where disableS3 is set to TRUE
if [ "$KOTSADM_DISABLE_S3" == 1 ] ; then
# Only use the PVC backup location for new installs where disableS3 is set to TRUE and
# there is a RWX storage class available (rook-cephfs or longhorn)
if [ "$KOTSADM_DISABLE_S3" == 1 ] && { kubectl get storageclass | grep "longhorn" || kubectl get storageclass | grep "rook-cephfs" ; } ; then
bslArgs="--provider replicated.com/pvc --bucket velero-internal-snapshots --backup-location-config storageSize=${VELERO_PVC_SIZE},resticRepoPrefix=/var/velero-local-volume-provider/velero-internal-snapshots/restic"
elif object_store_exists; then
local ip=$($DIR/bin/kurl netutil format-ip-address $OBJECT_STORE_CLUSTER_IP)
Expand All @@ -166,7 +167,7 @@ function velero_install() {
--namespace $VELERO_NAMESPACE \
--plugins velero/velero-plugin-for-aws:v1.7.1,velero/velero-plugin-for-gcp:v1.7.1,velero/velero-plugin-for-microsoft-azure:v1.7.1,replicated/local-volume-provider:v0.5.4,"$KURL_UTIL_IMAGE" \
--use-volume-snapshots=false \
--dry-run -o yaml > "$dst/velero.yaml"
--dry-run -o yaml > "$dst/velero.yaml"

rm -f velero-credentials
}
Expand All @@ -176,15 +177,15 @@ function velero_already_applied() {
local src="$DIR/addons/velero/$VELERO_VERSION"
local dst="$DIR/kustomize/velero"

# If we need to migrate, we're going to need to basically reconstruct the original install
# If we need to migrate, we're going to need to basically reconstruct the original install
# underneath the migration
if velero_should_migrate_from_object_store; then

render_yaml_file "$src/tmpl-kustomization.yaml" > "$dst/kustomization.yaml"

determine_velero_pvc_size

velero_binary
velero_binary
velero_install "$src" "$dst"
velero_patch_node_agent_privilege "$src" "$dst"
velero_patch_args "$src" "$dst"
Expand Down Expand Up @@ -225,7 +226,7 @@ function velero_already_applied() {

# The --secret-file flag should be used so that the generated velero deployment uses the
# cloud-credentials secret. Use the contents of that secret if it exists to avoid overwriting
# any changes.
# any changes.
function velero_credentials() {
if kubernetes_resource_exists "$VELERO_NAMESPACE" secret cloud-credentials; then
kubectl -n velero get secret cloud-credentials -ojsonpath='{ .data.cloud }' | base64 -d > velero-credentials
Expand Down Expand Up @@ -334,7 +335,7 @@ function velero_patch_http_proxy() {
fi
}

# If this cluster is used to restore a snapshot taken on a cluster where Rook or OpenEBS was the
# If this cluster is used to restore a snapshot taken on a cluster where Rook or OpenEBS was the
# default storage provisioner, the storageClassName on PVCs will need to be changed from "default"
# to "longhorn" by velero
# https://velero.io/docs/v1.6/restore-reference/#changing-pvpvc-storage-classes
Expand Down Expand Up @@ -362,18 +363,18 @@ EOF

function velero_should_migrate_from_object_store() {
# If KOTSADM_DISABLE_S3 is set, force the migration
if [ "$KOTSADM_DISABLE_S3" != 1 ]; then
if [ "$KOTSADM_DISABLE_S3" != 1 ]; then
return 1
fi

# if the PVC already exists, we've already migrated
if kubernetes_resource_exists "${VELERO_NAMESPACE}" pvc velero-internal-snapshots; then
if kubernetes_resource_exists "${VELERO_NAMESPACE}" pvc velero-internal-snapshots; then
return 1
fi

# if an object store isn't installed don't migrate
# TODO (dans): this doeesn't support minio in a non-standard namespace
if (! kubernetes_resource_exists rook-ceph deployment rook-ceph-rgw-rook-ceph-store-a) && (! kubernetes_resource_exists minio deployment minio); then
if (! kubernetes_resource_exists rook-ceph deployment rook-ceph-rgw-rook-ceph-store-a) && (! kubernetes_resource_exists minio deployment minio); then
return 1
fi

Expand All @@ -386,7 +387,7 @@ function velero_should_migrate_from_object_store() {
}

function velero_did_migrate_from_object_store() {

# If KOTSADM_DISABLE_S3 is set, force the migration
if [ -f "$DIR/kustomize/velero/kustomization.yaml" ] && cat "$DIR/kustomize/velero/kustomization.yaml" | grep -q "s3-migration-deployment-patch.yaml"; then
return 0
Expand All @@ -401,12 +402,12 @@ function velero_migrate_from_object_store() {
export VELERO_S3_HOST=
export VELERO_S3_ACCESS_KEY_ID=
export VELERO_S3_ACCESS_KEY_SECRET=
if kubernetes_resource_exists rook-ceph deployment rook-ceph-rgw-rook-ceph-store-a; then
if kubernetes_resource_exists rook-ceph deployment rook-ceph-rgw-rook-ceph-store-a; then
echo "Previous installation of Rook Ceph detected."
VELERO_S3_HOST="rook-ceph-rgw-rook-ceph-store.rook-ceph"
VELERO_S3_ACCESS_KEY_ID=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep AccessKey | head -1 | awk '{print $2}' | base64 --decode)
VELERO_S3_ACCESS_KEY_SECRET=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep SecretKey | head -1 | awk '{print $2}' | base64 --decode)
else
else
echo "Previous installation of Minio detected."
VELERO_S3_HOST="minio.minio"
VELERO_S3_ACCESS_KEY_ID=$(kubectl -n minio get secret minio-credentials -ojsonpath='{ .data.MINIO_ACCESS_KEY }' | base64 --decode)
Expand All @@ -432,8 +433,8 @@ function velero_migrate_from_object_store() {
insert_resources "$dst/kustomization.yaml" s3-migration-bsl.yaml
}

# add patches for the velero and node-agent to the current kustomization file that setup the PVC setup like the
# velero LVP plugin requires
# add patches for the velero and node-agent to the current kustomization file that setup the PVC setup like the
# velero LVP plugin requires
function velero_patch_internal_pvc_snapshots() {
local src="$1"
local dst="$2"
Expand Down
35 changes: 18 additions & 17 deletions addons/velero/template/base/install.tmpl.sh
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ function velero() {

kubectl label -n default --overwrite service/kubernetes velero.io/exclude-from-backup=true

# Bail if the migrationn fails, preventing the original object store from being deleted
# Bail if the migration fails, preventing the original object store from being deleted
if velero_did_migrate_from_object_store; then
logWarn "Velero will migrate from object store to pvc"
if ! try_5m velero_pvc_migrated ; then
Expand Down Expand Up @@ -95,7 +95,7 @@ function velero_join() {
}

function velero_host_init() {
velero_install_nfs_utils_if_missing
velero_install_nfs_utils_if_missing
}

function velero_install_nfs_utils_if_missing() {
Expand Down Expand Up @@ -142,8 +142,9 @@ function velero_install() {
local bslArgs="--no-default-backup-location"
if ! kubernetes_resource_exists "$VELERO_NAMESPACE" backupstoragelocation default; then

# Only use the PVC backup location for new installs where disableS3 is set to TRUE
if [ "$KOTSADM_DISABLE_S3" == 1 ] ; then
# Only use the PVC backup location for new installs where disableS3 is set to TRUE and
# there is a RWX storage class available (rook-cephfs or longhorn)
if [ "$KOTSADM_DISABLE_S3" == 1 ] && { kubectl get storageclass | grep "longhorn" || kubectl get storageclass | grep "rook-cephfs" ; } ; then
bslArgs="--provider replicated.com/pvc --bucket velero-internal-snapshots --backup-location-config storageSize=${VELERO_PVC_SIZE},resticRepoPrefix=/var/velero-local-volume-provider/velero-internal-snapshots/restic"
elif object_store_exists; then
local ip=$($DIR/bin/kurl netutil format-ip-address $OBJECT_STORE_CLUSTER_IP)
Expand All @@ -166,7 +167,7 @@ function velero_install() {
--namespace $VELERO_NAMESPACE \
--plugins velero/velero-plugin-for-aws:v__AWS_PLUGIN_VERSION__,velero/velero-plugin-for-gcp:v__GCP_PLUGIN_VERSION__,velero/velero-plugin-for-microsoft-azure:v__AZURE_PLUGIN_VERSION__,replicated/local-volume-provider:v__LOCAL_VOLUME_PROVIDER_VERSION__,"$KURL_UTIL_IMAGE" \
--use-volume-snapshots=false \
--dry-run -o yaml > "$dst/velero.yaml"
--dry-run -o yaml > "$dst/velero.yaml"

rm -f velero-credentials
}
Expand All @@ -176,15 +177,15 @@ function velero_already_applied() {
local src="$DIR/addons/velero/$VELERO_VERSION"
local dst="$DIR/kustomize/velero"

# If we need to migrate, we're going to need to basically reconstruct the original install
# If we need to migrate, we're going to need to basically reconstruct the original install
# underneath the migration
if velero_should_migrate_from_object_store; then

render_yaml_file "$src/tmpl-kustomization.yaml" > "$dst/kustomization.yaml"

determine_velero_pvc_size

velero_binary
velero_binary
velero_install "$src" "$dst"
velero_patch_node_agent_privilege "$src" "$dst"
velero_patch_args "$src" "$dst"
Expand Down Expand Up @@ -225,7 +226,7 @@ function velero_already_applied() {

# The --secret-file flag should be used so that the generated velero deployment uses the
# cloud-credentials secret. Use the contents of that secret if it exists to avoid overwriting
# any changes.
# any changes.
function velero_credentials() {
if kubernetes_resource_exists "$VELERO_NAMESPACE" secret cloud-credentials; then
kubectl -n velero get secret cloud-credentials -ojsonpath='{ .data.cloud }' | base64 -d > velero-credentials
Expand Down Expand Up @@ -334,7 +335,7 @@ function velero_patch_http_proxy() {
fi
}

# If this cluster is used to restore a snapshot taken on a cluster where Rook or OpenEBS was the
# If this cluster is used to restore a snapshot taken on a cluster where Rook or OpenEBS was the
# default storage provisioner, the storageClassName on PVCs will need to be changed from "default"
# to "longhorn" by velero
# https://velero.io/docs/v1.6/restore-reference/#changing-pvpvc-storage-classes
Expand Down Expand Up @@ -362,18 +363,18 @@ EOF

function velero_should_migrate_from_object_store() {
# If KOTSADM_DISABLE_S3 is set, force the migration
if [ "$KOTSADM_DISABLE_S3" != 1 ]; then
if [ "$KOTSADM_DISABLE_S3" != 1 ]; then
return 1
fi

# if the PVC already exists, we've already migrated
if kubernetes_resource_exists "${VELERO_NAMESPACE}" pvc velero-internal-snapshots; then
if kubernetes_resource_exists "${VELERO_NAMESPACE}" pvc velero-internal-snapshots; then
return 1
fi

# if an object store isn't installed don't migrate
# TODO (dans): this doeesn't support minio in a non-standard namespace
if (! kubernetes_resource_exists rook-ceph deployment rook-ceph-rgw-rook-ceph-store-a) && (! kubernetes_resource_exists minio deployment minio); then
if (! kubernetes_resource_exists rook-ceph deployment rook-ceph-rgw-rook-ceph-store-a) && (! kubernetes_resource_exists minio deployment minio); then
return 1
fi

Expand All @@ -386,7 +387,7 @@ function velero_should_migrate_from_object_store() {
}

function velero_did_migrate_from_object_store() {

# If KOTSADM_DISABLE_S3 is set, force the migration
if [ -f "$DIR/kustomize/velero/kustomization.yaml" ] && cat "$DIR/kustomize/velero/kustomization.yaml" | grep -q "s3-migration-deployment-patch.yaml"; then
return 0
Expand All @@ -401,12 +402,12 @@ function velero_migrate_from_object_store() {
export VELERO_S3_HOST=
export VELERO_S3_ACCESS_KEY_ID=
export VELERO_S3_ACCESS_KEY_SECRET=
if kubernetes_resource_exists rook-ceph deployment rook-ceph-rgw-rook-ceph-store-a; then
if kubernetes_resource_exists rook-ceph deployment rook-ceph-rgw-rook-ceph-store-a; then
echo "Previous installation of Rook Ceph detected."
VELERO_S3_HOST="rook-ceph-rgw-rook-ceph-store.rook-ceph"
VELERO_S3_ACCESS_KEY_ID=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep AccessKey | head -1 | awk '{print $2}' | base64 --decode)
VELERO_S3_ACCESS_KEY_SECRET=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep SecretKey | head -1 | awk '{print $2}' | base64 --decode)
else
else
echo "Previous installation of Minio detected."
VELERO_S3_HOST="minio.minio"
VELERO_S3_ACCESS_KEY_ID=$(kubectl -n minio get secret minio-credentials -ojsonpath='{ .data.MINIO_ACCESS_KEY }' | base64 --decode)
Expand All @@ -432,8 +433,8 @@ function velero_migrate_from_object_store() {
insert_resources "$dst/kustomization.yaml" s3-migration-bsl.yaml
}

# add patches for the velero and node-agent to the current kustomization file that setup the PVC setup like the
# velero LVP plugin requires
# add patches for the velero and node-agent to the current kustomization file that setup the PVC setup like the
# velero LVP plugin requires
function velero_patch_internal_pvc_snapshots() {
local src="$1"
local dst="$2"
Expand Down
2 changes: 1 addition & 1 deletion addons/velero/template/testgrid/k8s-docker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
airgap: true
installerSpec:
kubernetes:
version: "latest"
version: "1.27.x"
flannel:
version: latest
rook:
Expand Down

0 comments on commit 4676715

Please sign in to comment.