From 2614940dd4173bc670a9c9751625be52326d1851 Mon Sep 17 00:00:00 2001 From: Jian Qiu Date: Fri, 6 Sep 2024 11:24:54 +0800 Subject: [PATCH] local-cluster webhook (#777) 1. can only create one local-cluster 2. can not update local-cluster to non local-cluster Signed-off-by: Jian Qiu --- pkg/webhook/validating/webhook.go | 67 ++++++++++ pkg/webhook/validating/webhook_test.go | 176 +++++++++++++++++++++++++ test/e2e/webhook_test.go | 31 +++++ 3 files changed, 274 insertions(+) diff --git a/pkg/webhook/validating/webhook.go b/pkg/webhook/validating/webhook.go index f02bfefa7..293d75609 100644 --- a/pkg/webhook/validating/webhook.go +++ b/pkg/webhook/validating/webhook.go @@ -9,6 +9,7 @@ import ( hivev1 "github.com/openshift/hive/apis/hive/v1" apiconstants "github.com/stolostron/cluster-lifecycle-api/constants" + "github.com/stolostron/cluster-lifecycle-api/helpers/localcluster" v1 "k8s.io/api/admission/v1" authenticationv1 "k8s.io/api/authentication/v1" authorizationv1 "k8s.io/api/authorization/v1" @@ -163,8 +164,16 @@ func (a *AdmissionHandler) validateClusterPool(request *v1.AdmissionRequest) *v1 func (a *AdmissionHandler) validateManagedCluster(request *v1.AdmissionRequest) *v1.AdmissionResponse { switch request.Operation { case v1.Create: + resp := a.validateLocalClusterCreate(request) + if !resp.Allowed { + return resp + } return a.validateClusterSetJoinPermission(request) case v1.Update: + resp := a.validateLocalClusterUpdate(request) + if !resp.Allowed { + return resp + } updateClusterSet, response, oldClusterSet, newClusterSet := a.validateUpdateClusterSet(request) if !response.Allowed || !updateClusterSet { return response @@ -378,6 +387,64 @@ func (a *AdmissionHandler) responseNotAllowed(msg string) *v1.AdmissionResponse } } +// validateLocalClusterCreate check if the cluster can be created as the local cluster. Only one cluster in +// the hub can be created as the local cluster. +func (a *AdmissionHandler) validateLocalClusterCreate(request *v1.AdmissionRequest) *v1.AdmissionResponse { + newCluster := &clusterv1.ManagedCluster{} + err := json.Unmarshal(request.Object.Raw, newCluster) + if err != nil { + return a.responseNotAllowed(err.Error()) + } + + if localcluster.IsClusterSelfManaged(newCluster) { + clusters, err := a.ClusterLister.List(labels.Everything()) + if err != nil { + return a.responseNotAllowed(err.Error()) + } + for _, cluster := range clusters { + if localcluster.IsClusterSelfManaged(cluster) { + return &v1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: fmt.Sprintf("cluster %s is already local cluster, cannot create another local cluster", cluster.Name), + }, + } + } + } + } + + return a.responseAllowed() +} + +func (a *AdmissionHandler) validateLocalClusterUpdate(request *v1.AdmissionRequest) *v1.AdmissionResponse { + oldCluster := &clusterv1.ManagedCluster{} + newCluster := &clusterv1.ManagedCluster{} + + err := json.Unmarshal(request.Object.Raw, newCluster) + if err != nil { + return a.responseNotAllowed(err.Error()) + } + + err = json.Unmarshal(request.OldObject.Raw, oldCluster) + if err != nil { + return a.responseNotAllowed(err.Error()) + } + + if (localcluster.IsClusterSelfManaged(newCluster) && !localcluster.IsClusterSelfManaged(oldCluster)) || + (!localcluster.IsClusterSelfManaged(newCluster) && localcluster.IsClusterSelfManaged(oldCluster)) { + return &v1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: fmt.Sprintf("cluster %s is not allowed to update local-cluster label", newCluster.Name), + }, + } + } + + return a.responseAllowed() +} + func hasOwnerRef(ownerRefs []metav1.OwnerReference, wantRef metav1.OwnerReference) bool { if len(ownerRefs) <= 0 { return false diff --git a/pkg/webhook/validating/webhook_test.go b/pkg/webhook/validating/webhook_test.go index 1f1fb6f90..ffe50d176 100644 --- a/pkg/webhook/validating/webhook_test.go +++ b/pkg/webhook/validating/webhook_test.go @@ -1,6 +1,7 @@ package validating import ( + "fmt" "reflect" "testing" "time" @@ -858,3 +859,178 @@ func TestDeleteClusterDeployment(t *testing.T) { }) } } + +const ( + localClusterFmt = `{"apiVersion":"cluster.open-cluster-management.io/v1","kind":"ManagedCluster","metadata":{"labels":{"local-cluster":"true"},"name":"%s"},"spec":{}}` + noLocalClusterFmt = `{"apiVersion":"cluster.open-cluster-management.io/v1","kind":"ManagedCluster","metadata":{"name":"%s"},"spec":{}}` +) + +func TestLocalCluster(t *testing.T) { + + cases := []struct { + name string + request *v1.AdmissionRequest + existingManagedClusters []runtime.Object + expectedResponse *v1.AdmissionResponse + }{ + { + name: "local cluster already exists", + existingManagedClusters: []runtime.Object{ + &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "c0", + Labels: map[string]string{ + "local-cluster": "true", + }, + }, + }, + }, + expectedResponse: &v1.AdmissionResponse{ + Allowed: false, + }, + request: &v1.AdmissionRequest{ + Resource: managedClustersGVR, + Operation: v1.Create, + Name: "c1", + Object: runtime.RawExtension{ + Raw: []byte(fmt.Sprintf(localClusterFmt, "c1")), + }, + }, + }, + { + name: "local cluster does not exists", + existingManagedClusters: []runtime.Object{ + &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "c0", + }, + }, + }, + expectedResponse: &v1.AdmissionResponse{ + Allowed: true, + }, + request: &v1.AdmissionRequest{ + Resource: managedClustersGVR, + Operation: v1.Create, + Name: "c1", + Object: runtime.RawExtension{ + Raw: []byte(fmt.Sprintf(localClusterFmt, "c1")), + }, + }, + }, + { + name: "local cluster does not exists", + existingManagedClusters: []runtime.Object{ + &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "c0", + Labels: map[string]string{ + "local-cluster": "true", + }, + }, + }, + }, + expectedResponse: &v1.AdmissionResponse{ + Allowed: true, + }, + request: &v1.AdmissionRequest{ + Resource: managedClustersGVR, + Operation: v1.Create, + Name: "c1", + Object: runtime.RawExtension{ + Raw: []byte(fmt.Sprintf(noLocalClusterFmt, "c1")), + }, + }, + }, + { + name: "update local cluster to non local cluster", + existingManagedClusters: []runtime.Object{}, + expectedResponse: &v1.AdmissionResponse{ + Allowed: false, + }, + request: &v1.AdmissionRequest{ + Resource: managedClustersGVR, + Operation: v1.Update, + Name: "c0", + OldObject: runtime.RawExtension{ + Raw: []byte(fmt.Sprintf(localClusterFmt, "c0")), + }, + Object: runtime.RawExtension{ + Raw: []byte(fmt.Sprintf(noLocalClusterFmt, "c0")), + }, + }, + }, + { + name: "update non local cluster to local cluster", + existingManagedClusters: []runtime.Object{}, + expectedResponse: &v1.AdmissionResponse{ + Allowed: false, + }, + request: &v1.AdmissionRequest{ + Resource: managedClustersGVR, + Operation: v1.Update, + Name: "c0", + OldObject: runtime.RawExtension{ + Raw: []byte(fmt.Sprintf(noLocalClusterFmt, "c0")), + }, + Object: runtime.RawExtension{ + Raw: []byte(fmt.Sprintf(localClusterFmt, "c0")), + }, + }, + }, + { + name: "update local cluster", + existingManagedClusters: []runtime.Object{}, + expectedResponse: &v1.AdmissionResponse{ + Allowed: true, + }, + request: &v1.AdmissionRequest{ + Resource: managedClustersGVR, + Operation: v1.Update, + Name: "c0", + OldObject: runtime.RawExtension{ + Raw: []byte(fmt.Sprintf(localClusterFmt, "c0")), + }, + Object: runtime.RawExtension{ + Raw: []byte(fmt.Sprintf(localClusterFmt, "c0")), + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + clusterClient := clusterfake.NewSimpleClientset(c.existingManagedClusters...) + clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, 10*time.Minute) + clusterStore := clusterInformerFactory.Cluster().V1().ManagedClusters().Informer().GetStore() + for _, cluster := range c.existingManagedClusters { + if err := clusterStore.Add(cluster); err != nil { + t.Fatal(err) + } + } + + kubeClient := kubefake.NewClientset() + kubeClient.PrependReactor( + "create", + "subjectaccessreviews", + func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &authorizationv1.SubjectAccessReview{ + Status: authorizationv1.SubjectAccessReviewStatus{ + Allowed: true, + }, + }, nil + }, + ) + + admissionHandler := &AdmissionHandler{ + KubeClient: kubeClient, + ClusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(), + } + + actualResponse := admissionHandler.ValidateResource(c.request) + if !reflect.DeepEqual(actualResponse.Allowed, c.expectedResponse.Allowed) { + t.Errorf("case: %v,expected %#v but got: %#v", c.name, c.expectedResponse, actualResponse) + } + }) + } +} diff --git a/test/e2e/webhook_test.go b/test/e2e/webhook_test.go index dc461ecf8..bbd683ca7 100644 --- a/test/e2e/webhook_test.go +++ b/test/e2e/webhook_test.go @@ -5,6 +5,7 @@ import ( "fmt" hivev1 "github.com/openshift/hive/apis/hive/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/onsi/ginkgo/v2" @@ -594,3 +595,33 @@ var _ = ginkgo.Describe("Testing managed cluster deletion", func() { }) }) + +var _ = ginkgo.Describe("Testing local cluster", func() { + var userName = rand.String(6) + var clusterName = "e2e-" + userName + + ginkgo.It("Only can delete a cluster when it is not a hosting cluster", func() { + cluster := util.NewManagedCluster(clusterName) + cluster.Labels = map[string]string{"local-cluster": "true"} + ginkgo.By(fmt.Sprintf("create a managedCluster %s as the local cluster", clusterName)) + err := util.CreateManagedCluster(clusterClient, cluster) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("cannot create another local cluster")) + clusterLocalAnother := cluster.DeepCopy() + clusterLocalAnother.Name = clusterName + "-1" + err = util.CreateManagedCluster(clusterClient, clusterLocalAnother) + gomega.Expect(err).Should(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("cannot patch local cluster")) + patch := `{"metadata":{"labels":{"local-cluster":"false"}}}` + _, err = clusterClient.ClusterV1().ManagedClusters().Patch( + context.Background(), clusterName, types.MergePatchType, []byte(patch), metav1.PatchOptions{}) + gomega.Expect(err).Should(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("The cluster %s can be deleted now", clusterName)) + err = clusterClient.ClusterV1().ManagedClusters().Delete( + context.TODO(), clusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + }) +})