From 525d86859065a34190045e206759c453d3f5e4cd Mon Sep 17 00:00:00 2001 From: Swanand Shende Date: Mon, 18 Nov 2024 15:13:10 +0530 Subject: [PATCH] Fix-Update namespace restoration logic to respect existingResourcePolicy - Updated tag from v1.14.0.6-1 to v1.14.0.6-2 - This fix addresses an issue where namespace labels and annotations were not being updated during a restore, even when overwrite behavior was expected. - Updated namespace restore logic to handle `ExistingResourcePolicyTypeUpdate` using the `processUpdateResourcePolicy` function. - Added fallback logic to manually update labels and annotations if the patch operation fails. - Ensured that namespace updates only occur when the `--existing-resource-policy` flag is set to `update`. - Cluster-scoped resources like Namespace are included even when specific namespaces are selected. - Update image tag name from v1.14.0.8 to v1.14.0.9. Fixes KUBEDR-6210 ( cherry picked from commit ... ) --- Makefile | 2 +- pkg/restore/restore.go | 86 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 86 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index c70c9fd2bf..cfd8a4ff62 100644 --- a/Makefile +++ b/Makefile @@ -60,7 +60,7 @@ HUGO_IMAGE := hugo-builder local : ARCH ?= $(shell go env GOOS)-$(shell go env GOARCH) ARCH ?= linux-amd64 -VERSION ?= v1.14.0.6-1 +VERSION ?= v1.14.0.6-2 TAG_LATEST ?= false diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 88d8c389f3..42a75955f9 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -741,6 +741,88 @@ func (ctx *restoreContext) processSelectedResource( } // For namespaces resources we don't need to following steps if groupResource == kuberesource.Namespaces { + if existingNamespaces.Has(targetNS) { + // Check if the existing resource policy is set to 'update' + if len(ctx.restore.Spec.ExistingResourcePolicy) == 0 || ctx.restore.Spec.ExistingResourcePolicy != velerov1api.PolicyTypeUpdate { + ctx.log.Infof("Skipping update for existing namespace %s because existing resource policy is not 'update'", targetNS) + continue + } + + // Fetch the current namespace from the cluster + existingNS, err := ctx.namespaceClient.Get(go_context.TODO(), targetNS, metav1.GetOptions{}) + if err != nil { + errs.AddVeleroError(errors.Wrap(err, "fetching existing namespace")) + continue + } + + // Retrieve the backup namespace definition + backupNS := getNamespace( + ctx.log.WithField("namespace", namespace), + archive.GetItemFilePath(ctx.restoreDir, "namespaces", "", namespace), + targetNS, + ) + + // Convert both namespaces to unstructured for patching + existingNSUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(existingNS) + if err != nil { + errs.AddVeleroError(errors.Wrap(err, "converting existing namespace to unstructured")) + continue + } + backupNSUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(backupNS) + if err != nil { + errs.AddVeleroError(errors.Wrap(err, "converting backup namespace to unstructured")) + continue + } + + // Construct the GroupResource for namespaces + namespaceGR := schema.GroupResource{Group: "", Resource: "namespaces"} + + // Use getResourceClient to obtain a dynamic client for the namespace resource + resourceClient, err := ctx.getResourceClient(namespaceGR, &unstructured.Unstructured{Object: backupNSUnstructured}, "") + if err != nil { + errs.AddVeleroError(errors.Wrap(err, "getting dynamic client for Namespace resource")) + continue + } + + // Process the update policy using the existing function + warningsFromUpdateRP, errsFromUpdateRP := ctx.processUpdateResourcePolicy( + &unstructured.Unstructured{Object: existingNSUnstructured}, + &unstructured.Unstructured{Object: existingNSUnstructured}, // Pass existingNS with restore labels for the second parameter + &unstructured.Unstructured{Object: backupNSUnstructured}, + targetNS, + resourceClient, + ) + + // Fall back to manual label/annotation update if the patch fails + if !errsFromUpdateRP.IsEmpty() { + ctx.log.Warnf("Patch failed for namespace %s, falling back to manual label/annotation update", targetNS) + + // Ensure existingNS.Labels and Annotations are not nil + if existingNS.Labels == nil { + existingNS.Labels = make(map[string]string) + } + if existingNS.Annotations == nil { + existingNS.Annotations = make(map[string]string) + } + + // Merge labels and annotations + for k, v := range backupNS.Labels { + existingNS.Labels[k] = v + } + for k, v := range backupNS.Annotations { + existingNS.Annotations[k] = v + } + + // Apply the updated namespace + _, err = ctx.namespaceClient.Update(go_context.TODO(), existingNS, metav1.UpdateOptions{}) + if err != nil { + errs.AddVeleroError(errors.Wrap(err, "updating namespace manually")) + } + } + + warnings.Merge(&warningsFromUpdateRP) + errs.Merge(&errsFromUpdateRP) + } continue } @@ -2283,7 +2365,9 @@ func (ctx *restoreContext) getOrderedResourceCollection( continue } - if namespace == "" && !boolptr.IsSetToTrue(ctx.restore.Spec.IncludeClusterResources) && !ctx.namespaceIncludesExcludes.IncludeEverything() { + if groupResource.Resource == "namespaces" { + ctx.log.Infof("Including resource namespaces despite being cluster-scoped") + } else if namespace == "" && !boolptr.IsSetToTrue(ctx.restore.Spec.IncludeClusterResources) && !ctx.namespaceIncludesExcludes.IncludeEverything() { ctx.log.Infof("Skipping resource %s because it's cluster-scoped and only specific namespaces are included in the restore", resource) continue }