diff --git a/tests/v2/actions/workloads/pods/pods.go b/tests/v2/actions/workloads/pods/pods.go index 6d90945087f..953a816c498 100644 --- a/tests/v2/actions/workloads/pods/pods.go +++ b/tests/v2/actions/workloads/pods/pods.go @@ -3,6 +3,7 @@ package pods import ( "context" "errors" + "fmt" "regexp" "strings" "time" @@ -207,3 +208,63 @@ func CountPodContainerRunningByImage(client *rancher.Client, clusterID, namespac } return count, nil } + +// GetPodByName is a helper to retrieve Pod information by Pod name +func GetPodByName(client *rancher.Client, clusterID, namespaceName, podName string) (*corev1.Pod, error) { + downstreamContext, err := client.WranglerContext.DownStreamClusterWranglerContext(clusterID) + if err != nil { + return nil, err + } + + updatedPodList, err := downstreamContext.Core.Pod().List(namespaceName, metav1.ListOptions{ + FieldSelector: "metadata.name=" + podName, + }) + if err != nil { + return nil, err + } + + if len(updatedPodList.Items) == 0 { + return nil, fmt.Errorf("deployment %s not found", podName) + } + updatedPod := updatedPodList.Items[0] + + return &updatedPod, nil +} + +// GetPodNamesFromDeployment is a helper to get names of the pod in a deployment +func GetPodNamesFromDeployment(client *rancher.Client, clusterID, namespaceName string, deploymentName string) ([]string, error) { + deploymentList, err := deployments.ListDeployments(client, clusterID, namespaceName, metav1.ListOptions{ + FieldSelector: "metadata.name=" + deploymentName, + }) + if err != nil { + return nil, err + } + + if len(deploymentList.Items) == 0 { + return nil, fmt.Errorf("deployment %s not found", deploymentName) + } + deployment := deploymentList.Items[0] + selector := deployment.Spec.Selector + labelSelector, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return nil, err + } + + var podNames []string + downstreamContext, err := client.WranglerContext.DownStreamClusterWranglerContext(clusterID) + if err != nil { + return nil, err + } + pods, err := downstreamContext.Core.Pod().List(namespaceName, metav1.ListOptions{ + LabelSelector: labelSelector.String(), + }) + if err != nil { + return nil, err + } + + for _, pod := range pods.Items { + podNames = append(podNames, pod.Name) + } + + return podNames, nil +} diff --git a/tests/v2/validation/projects/projects.go b/tests/v2/validation/projects/projects.go index bb3a83d9d3e..f30b20d4d9b 100644 --- a/tests/v2/validation/projects/projects.go +++ b/tests/v2/validation/projects/projects.go @@ -1,25 +1,37 @@ package projects import ( + "encoding/json" "errors" "fmt" + "strconv" "strings" + "time" v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" "github.com/rancher/rancher/tests/v2/actions/kubeapi/namespaces" "github.com/rancher/rancher/tests/v2/actions/kubeapi/projects" rbacapi "github.com/rancher/rancher/tests/v2/actions/kubeapi/rbac" + quotas "github.com/rancher/rancher/tests/v2/actions/kubeapi/resourcequotas" + "github.com/rancher/rancher/tests/v2/actions/kubeapi/workloads/deployments" + "github.com/rancher/rancher/tests/v2/actions/workloads" + pod "github.com/rancher/rancher/tests/v2/actions/workloads/pods" "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + v1 "github.com/rancher/shepherd/clients/rancher/v1" namegen "github.com/rancher/shepherd/pkg/namegenerator" + "github.com/rancher/shepherd/pkg/wrangler" + appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( - dummyFinalizer = "example.com/dummy" - systemProjectLabel = "authz.management.cattle.io/system-project" - resourceQuotaAnnotation = "field.cattle.io/resourceQuota" + dummyFinalizer = "dummy" + systemProjectLabel = "authz.management.cattle.io/system-project" + resourceQuotaAnnotation = "field.cattle.io/resourceQuota" + containerDefaultLimitAnnotation = "field.cattle.io/containerDefaultResourceLimit" + resourceQuotaStatusAnnotation = "cattle.io/status" ) var prtb = v3.ProjectRoleTemplateBinding{ @@ -47,13 +59,56 @@ func createProjectAndNamespace(client *rancher.Client, clusterID string, project return createdProject, createdNamespace, nil } +func createProjectAndNamespaceWithQuotas(client *rancher.Client, clusterID string, namespacePodLimit, projectPodLimit string) (*v3.Project, *corev1.Namespace, error) { + projectTemplate := projects.NewProjectTemplate(clusterID) + projectTemplate.Spec.NamespaceDefaultResourceQuota.Limit.Pods = namespacePodLimit + projectTemplate.Spec.ResourceQuota.Limit.Pods = projectPodLimit + createdProject, createdNamespace, err := createProjectAndNamespace(client, clusterID, projectTemplate) + if err != nil { + return nil, nil, fmt.Errorf("failed to create project and namespace: %v", err) + } + + return createdProject, createdNamespace, nil +} + +func createProjectAndNamespaceWithLimits(client *rancher.Client, clusterID string, cpuLimit, cpuReservation, memoryLimit, memoryReservation string) (*v3.Project, *corev1.Namespace, error) { + projectTemplate := projects.NewProjectTemplate(clusterID) + projectTemplate.Spec.ContainerDefaultResourceLimit.LimitsCPU = cpuLimit + projectTemplate.Spec.ContainerDefaultResourceLimit.RequestsCPU = cpuReservation + projectTemplate.Spec.ContainerDefaultResourceLimit.LimitsMemory = memoryLimit + projectTemplate.Spec.ContainerDefaultResourceLimit.RequestsMemory = memoryReservation + + createdProject, createdNamespace, err := createProjectAndNamespace(client, clusterID, projectTemplate) + if err != nil { + return nil, nil, fmt.Errorf("failed to create project and namespace: %v", err) + } + + return createdProject, createdNamespace, nil +} + +func createProjectRoleTemplateBinding(client *rancher.Client, user *management.User, project *v3.Project, projectRole string) (*v3.ProjectRoleTemplateBinding, error) { + projectName := fmt.Sprintf("%s:%s", project.Namespace, project.Name) + prtb.Name = namegen.AppendRandomString("prtb-") + prtb.Namespace = project.Name + prtb.ProjectName = projectName + prtb.RoleTemplateName = projectRole + prtb.UserPrincipalName = user.PrincipalIDs[0] + + createdProjectRoleTemplateBinding, err := rbacapi.CreateProjectRoleTemplateBinding(client, &prtb) + if err != nil { + return nil, err + } + + return createdProjectRoleTemplateBinding, nil +} + func checkAnnotationExistsInNamespace(client *rancher.Client, clusterID string, namespaceName string, annotationKey string, expectedExistence bool) error { - updatedNamespace, err := namespaces.GetNamespaceByName(client, clusterID, namespaceName) + namespace, err := namespaces.GetNamespaceByName(client, clusterID, namespaceName) if err != nil { return err } - _, exists := updatedNamespace.Annotations[annotationKey] + _, exists := namespace.Annotations[annotationKey] if (expectedExistence && !exists) || (!expectedExistence && exists) { errorMessage := fmt.Sprintf("Annotation '%s' should%s exist", annotationKey, map[bool]string{true: "", false: " not"}[expectedExistence]) return errors.New(errorMessage) @@ -95,18 +150,304 @@ func checkNamespaceLabelsAndAnnotations(clusterID string, projectName string, na return nil } -func createProjectRoleTemplateBinding(client *rancher.Client, user *management.User, project *v3.Project, projectRole string) (*v3.ProjectRoleTemplateBinding, error) { - projectName := fmt.Sprintf("%s:%s", project.Namespace, project.Name) - prtb.Name = namegen.AppendRandomString("prtb-") - prtb.Namespace = project.Name - prtb.ProjectName = projectName - prtb.RoleTemplateName = projectRole - prtb.UserPrincipalName = user.PrincipalIDs[0] +func getLatestStatusMessageFromDeployment(deployment *appv1.Deployment, messageType string) (string, string, error) { + latestTime := time.Time{} + latestMessage := "" + latestReason := "" - createdProjectRoleTemplateBinding, err := rbacapi.CreateProjectRoleTemplateBinding(client, &prtb) + targetMessageType := appv1.DeploymentConditionType(messageType) + + for _, condition := range deployment.Status.Conditions { + if condition.Type == targetMessageType && condition.LastUpdateTime.After(latestTime) { + latestMessage = condition.Message + latestReason = condition.Reason + latestTime = condition.LastUpdateTime.Time + } + } + + return latestMessage, latestReason, nil +} + +func checkDeploymentStatus(client *rancher.Client, clusterID, namespaceName, deploymentName, statusType, expectedStatusReason, expectedStatusMessage string, expectedReplicaCount int32) error { + updatedDeploymentList, err := deployments.ListDeployments(client, clusterID, namespaceName, metav1.ListOptions{ + FieldSelector: "metadata.name=" + deploymentName, + }) + if err != nil { + return err + } + + if len(updatedDeploymentList.Items) == 0 { + return fmt.Errorf("deployment %s not found", deploymentName) + } + + updatedDeployment := updatedDeploymentList.Items[0] + + statusMsg, statusReason, err := getLatestStatusMessageFromDeployment(&updatedDeployment, statusType) + if err != nil { + return err + } + + if !strings.Contains(statusMsg, expectedStatusMessage) { + return fmt.Errorf("expected status message: %s, actual status message: %s", expectedStatusMessage, statusMsg) + } + + if !strings.Contains(statusReason, expectedStatusReason) { + return fmt.Errorf("expected status reason: %s, actual status reason: %s", expectedStatusReason, statusReason) + } + + if updatedDeployment.Status.ReadyReplicas != expectedReplicaCount { + return fmt.Errorf("unexpected number of ready replicas: expected %d, got %d", expectedReplicaCount, updatedDeployment.Status.ReadyReplicas) + } + + return nil +} + +func getStatusAndMessageFromAnnotation(annotation string, conditionType string) (string, string, error) { + var annotationData map[string][]map[string]string + if err := json.Unmarshal([]byte(annotation), &annotationData); err != nil { + return "", "", fmt.Errorf("error parsing JSON: %v", err) + } + + conditions, ok := annotationData["Conditions"] + if !ok { + return "", "", fmt.Errorf("no 'Conditions' found in annotation") + } + + for _, condition := range conditions { + if condition["Type"] == conditionType { + status := condition["Status"] + message := condition["Message"] + + return status, message, nil + } + } + + return "", "", fmt.Errorf("no condition of type '%s' found", conditionType) +} + +func getNamespaceLimit(client *rancher.Client, clusterID string, namespaceName, annotation string) (map[string]interface{}, error) { + namespace, err := namespaces.GetNamespaceByName(client, clusterID, namespaceName) if err != nil { return nil, err } - return createdProjectRoleTemplateBinding, nil + limitAnnotation := namespace.Annotations[annotation] + if limitAnnotation == "" { + return nil, errors.New("annotation not found") + } + + var data map[string]interface{} + err = json.Unmarshal([]byte(limitAnnotation), &data) + if err != nil { + return nil, err + } + + return data, nil +} + +func checkNamespaceResourceQuota(client *rancher.Client, clusterID, namespaceName string, expectedPodLimit int) error { + quotas, err := quotas.ListResourceQuotas(client, clusterID, namespaceName, metav1.ListOptions{}) + if err != nil { + return err + } + if len(quotas.Items) != 1 { + return fmt.Errorf("expected resource quota count is 1, but got %d", len(quotas.Items)) + } + + resourceList := quotas.Items[0].Spec.Hard + actualPodLimit, ok := resourceList[corev1.ResourcePods] + if !ok { + return fmt.Errorf("pod limit not found in the resource quota") + } + podLimit := int(actualPodLimit.Value()) + if podLimit != expectedPodLimit { + return fmt.Errorf("pod limit in the resource quota: %d does not match the expected value: %d", podLimit, expectedPodLimit) + } + + return nil +} + +func checkNamespaceResourceQuotaValidationStatus(client *rancher.Client, clusterID, namespaceName, namespacePodLimit string, expectedStatus bool, expectedErrorMessage string) error { + namespace, err := namespaces.GetNamespaceByName(client, clusterID, namespaceName) + if err != nil { + return err + } + + limitData, err := getNamespaceLimit(client, clusterID, namespace.Name, resourceQuotaAnnotation) + if err != nil { + return err + } + actualNamespacePodLimit := limitData["limit"].(map[string]interface{})["pods"] + + if actualNamespacePodLimit != namespacePodLimit { + return fmt.Errorf("namespace pod limit mismatch in the namespace spec. expected: %s, actual: %s", namespacePodLimit, actualNamespacePodLimit) + } + + status, message, err := getStatusAndMessageFromAnnotation(namespace.Annotations[resourceQuotaStatusAnnotation], "ResourceQuotaValidated") + if err != nil { + return err + } + + if (status == "True") != expectedStatus { + return fmt.Errorf("resource quota validation status mismatch. expected: %t, actual: %s", expectedStatus, status) + } + + if !strings.Contains(message, expectedErrorMessage) { + return fmt.Errorf("Error message does not contain expected substring: %s", expectedErrorMessage) + } + + return nil +} + +func getAndConvertDeployment(client *rancher.Client, clusterID string, deployment *appv1.Deployment) (*appv1.Deployment, error) { + steveClient, err := client.Steve.ProxyDownstream(clusterID) + if err != nil { + return nil, err + } + + deploymentID := deployment.Namespace + "/" + deployment.Name + deploymentResp, err := steveClient.SteveType(workloads.DeploymentSteveType).ByID(deploymentID) + if err != nil { + return nil, err + } + + deploymentObj := &appv1.Deployment{} + err = v1.ConvertToK8sType(deploymentResp.JSONResp, deploymentObj) + if err != nil { + return nil, err + } + return deploymentObj, nil +} + +func updateProjectContainerResourceLimit(client *rancher.Client, existingProject *v3.Project, cpuLimit, cpuReservation, memoryLimit, memoryReservation string) (*v3.Project, error) { + updatedProject := existingProject.DeepCopy() + updatedProject.Spec.ContainerDefaultResourceLimit.LimitsCPU = cpuLimit + updatedProject.Spec.ContainerDefaultResourceLimit.RequestsCPU = cpuReservation + updatedProject.Spec.ContainerDefaultResourceLimit.LimitsMemory = memoryLimit + updatedProject.Spec.ContainerDefaultResourceLimit.RequestsMemory = memoryReservation + + updatedProject, err := projects.UpdateProject(client, existingProject, updatedProject) + if err != nil { + return nil, err + } + + return updatedProject, nil +} + +func checkContainerResources(client *rancher.Client, clusterID, namespaceName, deploymentName, cpuLimit, cpuReservation, memoryLimit, memoryReservation string) error { + var errs []string + + podNames, err := pod.GetPodNamesFromDeployment(client, clusterID, namespaceName, deploymentName) + if err != nil { + return fmt.Errorf("error fetching pod by deployment name: %w", err) + } + if len(podNames) < 1 { + return errors.New("expected at least one pod, but got " + strconv.Itoa(len(podNames))) + } + + pod, err := pod.GetPodByName(client, clusterID, namespaceName, podNames[0]) + if err != nil { + return err + } + if len(pod.Spec.Containers) == 0 { + return errors.New("no containers found in the pod") + } + + normalizeString := func(s string) string { + if s == "" { + return "0" + } + return s + } + + cpuLimit = normalizeString(cpuLimit) + cpuReservation = normalizeString(cpuReservation) + memoryLimit = normalizeString(memoryLimit) + memoryReservation = normalizeString(memoryReservation) + + containerResources := pod.Spec.Containers[0].Resources + containerCPULimit := containerResources.Limits[corev1.ResourceCPU] + containerCPURequest := containerResources.Requests[corev1.ResourceCPU] + containerMemoryLimit := containerResources.Limits[corev1.ResourceMemory] + containerMemoryRequest := containerResources.Requests[corev1.ResourceMemory] + + if cpuLimit != containerCPULimit.String() { + errs = append(errs, "CPU limit mismatch") + } + if cpuReservation != containerCPURequest.String() { + errs = append(errs, "CPU reservation mismatch") + } + if memoryLimit != containerMemoryLimit.String() { + errs = append(errs, "Memory limit mismatch") + } + if memoryReservation != containerMemoryRequest.String() { + errs = append(errs, "Memory reservation mismatch") + } + + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + + return nil +} + +func checkLimitRange(client *rancher.Client, clusterID, namespaceName string, expectedCPULimit, expectedCPURequest, expectedMemoryLimit, expectedMemoryRequest string) error { + var ctx *wrangler.Context + var err error + + if clusterID != "local" { + ctx, err = client.WranglerContext.DownStreamClusterWranglerContext(clusterID) + if err != nil { + return fmt.Errorf("failed to get downstream context: %w", err) + } + } else { + ctx = client.WranglerContext + } + + limitRanges, err := ctx.Core.LimitRange().List(namespaceName, metav1.ListOptions{}) + if err != nil { + return err + } + if len(limitRanges.Items) != 1 { + return fmt.Errorf("expected limit range count is 1, but got %d", len(limitRanges.Items)) + } + limitRangeList := limitRanges.Items[0].Spec + + actualCPULimit, ok := limitRangeList.Limits[0].Default["cpu"] + if !ok { + return fmt.Errorf("cpu limit not found in the limit range") + } + cpuLimit := actualCPULimit.String() + if cpuLimit != expectedCPULimit { + return fmt.Errorf("cpu limit in the limit range: %s does not match the expected value: %s", cpuLimit, expectedCPULimit) + } + + actualMemoryLimit, ok := limitRangeList.Limits[0].Default["memory"] + if !ok { + return fmt.Errorf("memory limit not found in the limit range") + } + memoryLimit := actualMemoryLimit.String() + if memoryLimit != expectedMemoryLimit { + return fmt.Errorf("memory limit in the limit range: %s does not match the expected value: %s", memoryLimit, expectedMemoryLimit) + } + + actualCPURequest, ok := limitRangeList.Limits[0].DefaultRequest["cpu"] + if !ok { + return fmt.Errorf("cpu request not found in the limit range") + } + cpuRequest := actualCPURequest.String() + if cpuRequest != expectedCPURequest { + return fmt.Errorf("cpu request in the limit range: %s does not match the expected value: %s", cpuRequest, expectedCPURequest) + } + + actualMemoryRequest, ok := limitRangeList.Limits[0].DefaultRequest["memory"] + if !ok { + return fmt.Errorf("memory request not found in the limit range") + } + memoryRequest := actualMemoryRequest.String() + if memoryRequest != expectedMemoryRequest { + return fmt.Errorf("memory request in the limit range: %s does not match the expected value: %s", memoryRequest, expectedMemoryRequest) + } + + return nil } diff --git a/tests/v2/validation/projects/projects_container_default_resource_limit_test.go b/tests/v2/validation/projects/projects_container_default_resource_limit_test.go new file mode 100644 index 00000000000..27988eb49ae --- /dev/null +++ b/tests/v2/validation/projects/projects_container_default_resource_limit_test.go @@ -0,0 +1,345 @@ +//go:build (validation || infra.any || cluster.any || extended) && !sanity && !stress + +package projects + +import ( + "fmt" + "regexp" + "testing" + + "github.com/rancher/rancher/tests/v2/actions/kubeapi/namespaces" + "github.com/rancher/rancher/tests/v2/actions/kubeapi/projects" + "github.com/rancher/rancher/tests/v2/actions/rbac" + deployment "github.com/rancher/rancher/tests/v2/actions/workloads/deployment" + "github.com/rancher/shepherd/clients/rancher" + management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + "github.com/rancher/shepherd/extensions/charts" + "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/users" + "github.com/rancher/shepherd/pkg/session" + "github.com/rancher/shepherd/pkg/wrangler" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ProjectsContainerResourceLimitTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session + cluster *management.Cluster +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) TearDownSuite() { + pcrl.session.Cleanup() +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) SetupSuite() { + pcrl.session = session.NewSession() + + client, err := rancher.NewClient("", pcrl.session) + require.NoError(pcrl.T(), err) + + pcrl.client = client + + clusterName := client.RancherConfig.ClusterName + require.NotEmptyf(pcrl.T(), clusterName, "Cluster name to install should be set") + clusterID, err := clusters.GetClusterIDByName(pcrl.client, clusterName) + require.NoError(pcrl.T(), err, "Error getting cluster ID") + pcrl.cluster, err = pcrl.client.Management.Cluster.ByID(clusterID) + require.NoError(pcrl.T(), err) +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) setupUserForProject() (*rancher.Client, *wrangler.Context) { + log.Info("Create a standard user and add the user to the downstream cluster as cluster owner.") + standardUser, err := users.CreateUserWithRole(pcrl.client, users.UserConfig(), projects.StandardUser) + require.NoError(pcrl.T(), err, "Failed to create standard user") + standardUserClient, err := pcrl.client.AsUser(standardUser) + require.NoError(pcrl.T(), err) + err = users.AddClusterRoleToUser(pcrl.client, pcrl.cluster, standardUser, rbac.ClusterOwner.String(), nil) + require.NoError(pcrl.T(), err, "Failed to add the user as a cluster owner to the downstream cluster") + + standardUserContext, err := standardUserClient.WranglerContext.DownStreamClusterWranglerContext(pcrl.cluster.ID) + require.NoError(pcrl.T(), err) + + return standardUserClient, standardUserContext +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) TestCpuAndMemoryLimitLessThanRequest() { + subSession := pcrl.session.NewSession() + defer subSession.Cleanup() + + standardUserClient, _ := pcrl.setupUserForProject() + + log.Info("Create a project in the downstream cluster with CPU and Memory request set greater than CPU and Memory limit. Verify that the webhook rejects the request.") + cpuLimit := "100m" + cpuReservation := "200m" + memoryLimit := "32Mi" + memoryReservation := "64Mi" + + _, _, err := createProjectAndNamespaceWithLimits(standardUserClient, pcrl.cluster.ID, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.Error(pcrl.T(), err) + pattern := fmt.Sprintf(`admission webhook "rancher.cattle.io.projects.management.cattle.io" denied the request: project.spec.containerDefaultResourceLimit: Invalid value: v3.ContainerResourceLimit{RequestsCPU:"%s", RequestsMemory:"%s", LimitsCPU:"%s", LimitsMemory:"%s"}: requested CPU %s is greater than limit %s\nproject.spec.containerDefaultResourceLimit: Invalid value: v3.ContainerResourceLimit{RequestsCPU:"%s", RequestsMemory:"%s", LimitsCPU:"%s", LimitsMemory:"%s"}: requested memory %s is greater than limit %s`, cpuReservation, memoryReservation, cpuLimit, memoryLimit, cpuReservation, cpuLimit, cpuReservation, memoryReservation, cpuLimit, memoryLimit, memoryReservation, memoryLimit) + require.Regexp(pcrl.T(), regexp.MustCompile(pattern), err.Error()) +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) TestCpuLimitLessThanRequest() { + subSession := pcrl.session.NewSession() + defer subSession.Cleanup() + + standardUserClient, _ := pcrl.setupUserForProject() + + log.Info("Create a project in the downstream cluster with CPU request set greater than the CPU limit. Verify that the webhook rejects the request.") + cpuLimit := "100m" + cpuReservation := "200m" + memoryLimit := "" + memoryReservation := "" + + _, _, err := createProjectAndNamespaceWithLimits(standardUserClient, pcrl.cluster.ID, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.Error(pcrl.T(), err) + pattern := fmt.Sprintf(`admission webhook "rancher.cattle.io.projects.management.cattle.io" denied the request: project.spec.containerDefaultResourceLimit: Invalid value: v3.ContainerResourceLimit{RequestsCPU:"%s", RequestsMemory:"%s", LimitsCPU:"%s", LimitsMemory:"%s"}: requested CPU %s is greater than limit %s`, cpuReservation, memoryReservation, cpuLimit, memoryLimit, cpuReservation, cpuLimit) + require.Regexp(pcrl.T(), regexp.MustCompile(pattern), err.Error()) +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) TestMemoryLimitLessThanRequest() { + subSession := pcrl.session.NewSession() + defer subSession.Cleanup() + + standardUserClient, _ := pcrl.setupUserForProject() + + log.Info("Create a project in the downstream cluster with Memory request set greater than the Memory limit. Verify that the webhook rejects the request.") + cpuLimit := "" + cpuReservation := "" + memoryLimit := "32Mi" + memoryReservation := "64Mi" + + _, _, err := createProjectAndNamespaceWithLimits(standardUserClient, pcrl.cluster.ID, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.Error(pcrl.T(), err) + pattern := fmt.Sprintf(`admission webhook "rancher.cattle.io.projects.management.cattle.io" denied the request: project.spec.containerDefaultResourceLimit: Invalid value: v3.ContainerResourceLimit{RequestsCPU:"%s", RequestsMemory:"%s", LimitsCPU:"%s", LimitsMemory:"%s"}: requested memory %s is greater than limit %s`, cpuReservation, memoryReservation, cpuLimit, memoryLimit, memoryReservation, memoryLimit) + require.Regexp(pcrl.T(), regexp.MustCompile(pattern), err.Error()) +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) TestValidCpuLimitButMemoryLimitLessThanRequest() { + subSession := pcrl.session.NewSession() + defer subSession.Cleanup() + + standardUserClient, _ := pcrl.setupUserForProject() + + log.Info("Create a project in the downstream cluster with CPU request set lower than the CPU limit but Memory request set greater than the Memory Request. Verify that the webhook rejects the request.") + cpuLimit := "200m" + cpuReservation := "100m" + memoryLimit := "32Mi" + memoryReservation := "64Mi" + + _, _, err := createProjectAndNamespaceWithLimits(standardUserClient, pcrl.cluster.ID, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.Error(pcrl.T(), err) + pattern := fmt.Sprintf(`admission webhook "rancher.cattle.io.projects.management.cattle.io" denied the request: project.spec.containerDefaultResourceLimit: Invalid value: v3.ContainerResourceLimit{RequestsCPU:"%s", RequestsMemory:"%s", LimitsCPU:"%s", LimitsMemory:"%s"}: requested memory %s is greater than limit %s`, cpuReservation, memoryReservation, cpuLimit, memoryLimit, memoryReservation, memoryLimit) + require.Regexp(pcrl.T(), regexp.MustCompile(pattern), err.Error()) +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) TestCpuAndMemoryLimitEqualToRequest() { + subSession := pcrl.session.NewSession() + defer subSession.Cleanup() + + standardUserClient, _ := pcrl.setupUserForProject() + + log.Info("Create a project (with CPU and Memory limit equal to Request) and a namespace in the project.") + cpuLimit := "200m" + cpuReservation := "200m" + memoryLimit := "64Mi" + memoryReservation := "64Mi" + + createdProject, createdNamespace, err := createProjectAndNamespaceWithLimits(standardUserClient, pcrl.cluster.ID, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) + + log.Info("Verify that the container default resource limit in the Project spec is accurate.") + projectSpec := createdProject.Spec.ContainerDefaultResourceLimit + require.Equal(pcrl.T(), cpuLimit, projectSpec.LimitsCPU, "CPU limit mismatch") + require.Equal(pcrl.T(), cpuReservation, projectSpec.RequestsCPU, "CPU reservation mismatch") + require.Equal(pcrl.T(), memoryLimit, projectSpec.LimitsMemory, "Memory limit mismatch") + require.Equal(pcrl.T(), memoryReservation, projectSpec.RequestsMemory, "Memory reservation mismatch") + + log.Info("Verify that the namespace has the label and annotation referencing the project.") + updatedNamespace, err := namespaces.GetNamespaceByName(standardUserClient, pcrl.cluster.ID, createdNamespace.Name) + require.NoError(pcrl.T(), err) + err = checkNamespaceLabelsAndAnnotations(pcrl.cluster.ID, createdProject.Name, updatedNamespace) + require.NoError(pcrl.T(), err) + + log.Info("Verify that the limit range object is created for the namespace and the resource limit in the limit range is accurate.") + err = checkLimitRange(standardUserClient, pcrl.cluster.ID, updatedNamespace.Name, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) + + log.Info("Create a deployment in the namespace with one replica and verify that a pod is created.") + createdDeployment, err := deployment.CreateDeployment(standardUserClient, pcrl.cluster.ID, updatedNamespace.Name, 1, "", "", false, false) + require.NoError(pcrl.T(), err, "Failed to create deployment in the namespace") + err = charts.WatchAndWaitDeployments(standardUserClient, pcrl.cluster.ID, updatedNamespace.Name, metav1.ListOptions{ + FieldSelector: "metadata.name=" + createdDeployment.Name, + }) + require.NoError(pcrl.T(), err) + + log.Info("Verify that the resource limits and requests for the container in the pod spec is accurate.") + err = checkContainerResources(standardUserClient, pcrl.cluster.ID, updatedNamespace.Name, createdDeployment.Name, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) TestCpuAndMemoryLimitGreaterThanRequest() { + subSession := pcrl.session.NewSession() + defer subSession.Cleanup() + + standardUserClient, _ := pcrl.setupUserForProject() + + log.Info("Create a project (with CPU and Memory limit greater than Request) and a namespace in the project.") + cpuLimit := "200m" + cpuReservation := "100m" + memoryLimit := "64Mi" + memoryReservation := "32Mi" + + createdProject, createdNamespace, err := createProjectAndNamespaceWithLimits(standardUserClient, pcrl.cluster.ID, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) + + log.Info("Verify that the container default resource limit in the Project spec is accurate.") + projectSpec := createdProject.Spec.ContainerDefaultResourceLimit + require.Equal(pcrl.T(), cpuLimit, projectSpec.LimitsCPU, "CPU limit mismatch") + require.Equal(pcrl.T(), cpuReservation, projectSpec.RequestsCPU, "CPU reservation mismatch") + require.Equal(pcrl.T(), memoryLimit, projectSpec.LimitsMemory, "Memory limit mismatch") + require.Equal(pcrl.T(), memoryReservation, projectSpec.RequestsMemory, "Memory reservation mismatch") + + log.Info("Verify that the namespace has the label and annotation referencing the project.") + updatedNamespace, err := namespaces.GetNamespaceByName(standardUserClient, pcrl.cluster.ID, createdNamespace.Name) + require.NoError(pcrl.T(), err) + err = checkNamespaceLabelsAndAnnotations(pcrl.cluster.ID, createdProject.Name, updatedNamespace) + require.NoError(pcrl.T(), err) + + log.Info("Verify that the limit range object is created for the namespace and the resource limit in the limit range is accurate.") + err = checkLimitRange(standardUserClient, pcrl.cluster.ID, updatedNamespace.Name, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) + + log.Info("Create a deployment in the namespace with one replica and verify that a pod is created.") + createdDeployment, err := deployment.CreateDeployment(standardUserClient, pcrl.cluster.ID, updatedNamespace.Name, 1, "", "", false, false) + require.NoError(pcrl.T(), err, "Failed to create deployment in the namespace") + err = charts.WatchAndWaitDeployments(standardUserClient, pcrl.cluster.ID, updatedNamespace.Name, metav1.ListOptions{ + FieldSelector: "metadata.name=" + createdDeployment.Name, + }) + require.NoError(pcrl.T(), err) + + log.Info("Verify that the resource limits and requests for the container in the pod spec is accurate.") + err = checkContainerResources(standardUserClient, pcrl.cluster.ID, updatedNamespace.Name, createdDeployment.Name, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) TestUpdateProjectWithCpuAndMemoryLimitLessThanRequest() { + subSession := pcrl.session.NewSession() + defer subSession.Cleanup() + + standardUserClient, _ := pcrl.setupUserForProject() + + log.Info("Create a project (with valid container default resource limit) and a namespace in the project.") + cpuLimit := "100m" + cpuReservation := "50m" + memoryLimit := "64Mi" + memoryReservation := "32Mi" + + createdProject, createdNamespace, err := createProjectAndNamespaceWithLimits(standardUserClient, pcrl.cluster.ID, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) + + log.Info("Verify that the container default resource limit in the Project spec is accurate.") + projectSpec := createdProject.Spec.ContainerDefaultResourceLimit + require.Equal(pcrl.T(), cpuLimit, projectSpec.LimitsCPU, "CPU limit mismatch") + require.Equal(pcrl.T(), cpuReservation, projectSpec.RequestsCPU, "CPU reservation mismatch") + require.Equal(pcrl.T(), memoryLimit, projectSpec.LimitsMemory, "Memory limit mismatch") + require.Equal(pcrl.T(), memoryReservation, projectSpec.RequestsMemory, "Memory reservation mismatch") + + log.Info("Verify that the limit range object is created for the namespace and the resource limit in the limit range is accurate.") + err = checkLimitRange(standardUserClient, pcrl.cluster.ID, createdNamespace.Name, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) + + log.Info("Update the project with CPU and Memory request set greater than CPU and Memory limit. Verify that the webhook rejects the request.") + cpuLimit = "100m" + cpuReservation = "200m" + memoryLimit = "32Mi" + memoryReservation = "64Mi" + _, err = updateProjectContainerResourceLimit(standardUserClient, createdProject, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.Error(pcrl.T(), err) + pattern := fmt.Sprintf(`admission webhook "rancher.cattle.io.projects.management.cattle.io" denied the request: project.spec.containerDefaultResourceLimit: Invalid value: v3.ContainerResourceLimit{RequestsCPU:"%s", RequestsMemory:"%s", LimitsCPU:"%s", LimitsMemory:"%s"}: requested CPU %s is greater than limit %s\nproject.spec.containerDefaultResourceLimit: Invalid value: v3.ContainerResourceLimit{RequestsCPU:"%s", RequestsMemory:"%s", LimitsCPU:"%s", LimitsMemory:"%s"}: requested memory %s is greater than limit %s`, cpuReservation, memoryReservation, cpuLimit, memoryLimit, cpuReservation, cpuLimit, cpuReservation, memoryReservation, cpuLimit, memoryLimit, memoryReservation, memoryLimit) + require.Regexp(pcrl.T(), regexp.MustCompile(pattern), err.Error()) +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) TestUpdateProjectWithCpuLimitLessThanRequest() { + subSession := pcrl.session.NewSession() + defer subSession.Cleanup() + + standardUserClient, _ := pcrl.setupUserForProject() + + log.Info("Create a project (with valid container default resource limit) and a namespace in the project.") + cpuLimit := "100m" + cpuReservation := "50m" + memoryLimit := "64Mi" + memoryReservation := "32Mi" + + createdProject, createdNamespace, err := createProjectAndNamespaceWithLimits(standardUserClient, pcrl.cluster.ID, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) + + log.Info("Verify that the container default resource limit in the Project spec is accurate.") + projectSpec := createdProject.Spec.ContainerDefaultResourceLimit + require.Equal(pcrl.T(), cpuLimit, projectSpec.LimitsCPU, "CPU limit mismatch") + require.Equal(pcrl.T(), cpuReservation, projectSpec.RequestsCPU, "CPU reservation mismatch") + require.Equal(pcrl.T(), memoryLimit, projectSpec.LimitsMemory, "Memory limit mismatch") + require.Equal(pcrl.T(), memoryReservation, projectSpec.RequestsMemory, "Memory reservation mismatch") + + log.Info("Verify that the limit range object is created for the namespace and the resource limit in the limit range is accurate.") + err = checkLimitRange(standardUserClient, pcrl.cluster.ID, createdNamespace.Name, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) + + log.Info("Update the project with CPU request set greater than the CPU limit. Verify that the webhook rejects the request.") + cpuLimit = "100m" + cpuReservation = "200m" + memoryLimit = "" + memoryReservation = "" + _, err = updateProjectContainerResourceLimit(standardUserClient, createdProject, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.Error(pcrl.T(), err) + pattern := fmt.Sprintf(`admission webhook "rancher.cattle.io.projects.management.cattle.io" denied the request: project.spec.containerDefaultResourceLimit: Invalid value: v3.ContainerResourceLimit{RequestsCPU:"%s", RequestsMemory:"%s", LimitsCPU:"%s", LimitsMemory:"%s"}: requested CPU %s is greater than limit %s`, cpuReservation, memoryReservation, cpuLimit, memoryLimit, cpuReservation, cpuLimit) + require.Regexp(pcrl.T(), regexp.MustCompile(pattern), err.Error()) +} + +func (pcrl *ProjectsContainerResourceLimitTestSuite) TestUpdateProjectWithMemoryLimitLessThanRequest() { + subSession := pcrl.session.NewSession() + defer subSession.Cleanup() + + standardUserClient, _ := pcrl.setupUserForProject() + + log.Info("Create a project (with valid container default resource limit) and a namespace in the project.") + cpuLimit := "100m" + cpuReservation := "50m" + memoryLimit := "64Mi" + memoryReservation := "32Mi" + + createdProject, createdNamespace, err := createProjectAndNamespaceWithLimits(standardUserClient, pcrl.cluster.ID, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) + + log.Info("Verify that the container default resource limit in the Project spec is accurate.") + projectSpec := createdProject.Spec.ContainerDefaultResourceLimit + require.Equal(pcrl.T(), cpuLimit, projectSpec.LimitsCPU, "CPU limit mismatch") + require.Equal(pcrl.T(), cpuReservation, projectSpec.RequestsCPU, "CPU reservation mismatch") + require.Equal(pcrl.T(), memoryLimit, projectSpec.LimitsMemory, "Memory limit mismatch") + require.Equal(pcrl.T(), memoryReservation, projectSpec.RequestsMemory, "Memory reservation mismatch") + + log.Info("Verify that the limit range object is created for the namespace and the resource limit in the limit range is accurate.") + err = checkLimitRange(standardUserClient, pcrl.cluster.ID, createdNamespace.Name, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.NoError(pcrl.T(), err) + + log.Info("Update the project with Memory request set greater than Memory limit. Verify that the webhook rejects the request.") + cpuLimit = "" + cpuReservation = "" + memoryLimit = "32Mi" + memoryReservation = "64Mi" + _, err = updateProjectContainerResourceLimit(standardUserClient, createdProject, cpuLimit, cpuReservation, memoryLimit, memoryReservation) + require.Error(pcrl.T(), err) + pattern := fmt.Sprintf(`admission webhook "rancher.cattle.io.projects.management.cattle.io" denied the request: project.spec.containerDefaultResourceLimit: Invalid value: v3.ContainerResourceLimit{RequestsCPU:"%s", RequestsMemory:"%s", LimitsCPU:"%s", LimitsMemory:"%s"}: requested memory %s is greater than limit %s`, cpuReservation, memoryReservation, cpuLimit, memoryLimit, memoryReservation, memoryLimit) + require.Regexp(pcrl.T(), regexp.MustCompile(pattern), err.Error()) +} + +func TestProjectsContainerResourceLimitTestSuite(t *testing.T) { + suite.Run(t, new(ProjectsContainerResourceLimitTestSuite)) +} diff --git a/tests/v2/validation/projects/projects_test.go b/tests/v2/validation/projects/projects_test.go index 93719e0846d..8ecf263c303 100644 --- a/tests/v2/validation/projects/projects_test.go +++ b/tests/v2/validation/projects/projects_test.go @@ -10,11 +10,10 @@ import ( "github.com/rancher/rancher/tests/v2/actions/kubeapi/projects" project "github.com/rancher/rancher/tests/v2/actions/projects" rbac "github.com/rancher/rancher/tests/v2/actions/rbac" - deployment "github.com/rancher/rancher/tests/v2/actions/workloads/deployment" "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/charts" "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/users" "github.com/rancher/shepherd/pkg/session" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" @@ -96,8 +95,12 @@ func (pr *ProjectsTestSuite) TestProjectsCrudDownstreamCluster() { defer subSession.Cleanup() log.Info("Create a standard user and add the user to the downstream cluster as cluster owner.") - _, standardUserClient, err := rbac.AddUserWithRoleToCluster(pr.client, rbac.StandardUser.String(), rbac.ClusterOwner.String(), pr.cluster, nil) + standardUser, err := users.CreateUserWithRole(pr.client, users.UserConfig(), projects.StandardUser) + require.NoError(pr.T(), err, "Failed to create standard user") + standardUserClient, err := pr.client.AsUser(standardUser) require.NoError(pr.T(), err) + err = users.AddClusterRoleToUser(pr.client, pr.cluster, standardUser, rbac.ClusterOwner.String(), nil) + require.NoError(pr.T(), err, "Failed to add the user as a cluster owner to the downstream cluster") log.Info("Create a project in the downstream cluster and verify that the project can be listed.") projectTemplate := projects.NewProjectTemplate(pr.cluster.ID) @@ -167,47 +170,17 @@ func (pr *ProjectsTestSuite) TestDeleteSystemProject() { require.Equal(pr.T(), expectedErrorMessage, err.Error()) } -func (pr *ProjectsTestSuite) TestProjectWithoutResourceQuota() { - subSession := pr.session.NewSession() - defer subSession.Cleanup() - - log.Info("Create a standard user and add the user to the downstream cluster as cluster owner.") - _, standardUserClient, err := rbac.AddUserWithRoleToCluster(pr.client, rbac.StandardUser.String(), rbac.ClusterOwner.String(), pr.cluster, nil) - require.NoError(pr.T(), err) - - log.Info("Create a project (without any resource quota) and a namespace in the project.") - projectTemplate := projects.NewProjectTemplate(pr.cluster.ID) - createdProject, createdNamespace, err := createProjectAndNamespace(standardUserClient, pr.cluster.ID, projectTemplate) - require.NoError(pr.T(), err) - - log.Info("Verify that the namespace has the label and annotation referencing the project.") - updatedNamespace, err := namespaces.GetNamespaceByName(standardUserClient, pr.cluster.ID, createdNamespace.Name) - require.NoError(pr.T(), err) - err = checkNamespaceLabelsAndAnnotations(pr.cluster.ID, createdProject.Name, updatedNamespace) - require.NoError(pr.T(), err) - - log.Info("Verify that the namespace does not have the annotation: field.cattle.io/resourceQuota.") - err = checkAnnotationExistsInNamespace(standardUserClient, pr.cluster.ID, updatedNamespace.Name, resourceQuotaAnnotation, false) - require.NoError(pr.T(), err, "'field.cattle.io/resourceQuota' annotation should not exist") - - log.Info("Create a deployment in the namespace with ten replicas.") - deployment, err := deployment.CreateDeployment(standardUserClient, pr.cluster.ID, updatedNamespace.Name, 10, "", "", false, false) - require.NoError(pr.T(), err, "Failed to create deployment in the namespace") - - log.Info("Verify that there are ten pods created in the deployment and they are in Running state.") - err = charts.WatchAndWaitDeployments(standardUserClient, pr.cluster.ID, updatedNamespace.Name, metav1.ListOptions{ - FieldSelector: "metadata.name=" + deployment.Name, - }) - require.NoError(pr.T(), err) -} - func (pr *ProjectsTestSuite) TestMoveNamespaceOutOfProject() { subSession := pr.session.NewSession() defer subSession.Cleanup() log.Info("Create a standard user and add the user to the downstream cluster as cluster owner.") - _, standardUserClient, err := rbac.AddUserWithRoleToCluster(pr.client, rbac.StandardUser.String(), rbac.ClusterOwner.String(), pr.cluster, nil) + standardUser, err := users.CreateUserWithRole(pr.client, users.UserConfig(), projects.StandardUser) + require.NoError(pr.T(), err, "Failed to create standard user") + standardUserClient, err := pr.client.AsUser(standardUser) require.NoError(pr.T(), err) + err = users.AddClusterRoleToUser(pr.client, pr.cluster, standardUser, rbac.ClusterOwner.String(), nil) + require.NoError(pr.T(), err, "Failed to add the user as a cluster owner to the downstream cluster") log.Info("Create a project in the downstream cluster and a namespace in the project.") projectTemplate := projects.NewProjectTemplate(pr.cluster.ID) @@ -240,76 +213,6 @@ func (pr *ProjectsTestSuite) TestMoveNamespaceOutOfProject() { require.Error(pr.T(), err) } -func (pr *ProjectsTestSuite) TestMoveNamespaceBetweenProjectsWithNoResourceQuota() { - subSession := pr.session.NewSession() - defer subSession.Cleanup() - - log.Info("Create a standard user and add the user to the downstream cluster as cluster owner.") - _, standardUserClient, err := rbac.AddUserWithRoleToCluster(pr.client, rbac.StandardUser.String(), rbac.ClusterOwner.String(), pr.cluster, nil) - require.NoError(pr.T(), err) - - log.Info("Create a project in the downstream cluster and a namespace in the project.") - projectTemplate := projects.NewProjectTemplate(pr.cluster.ID) - projectTemplate.Spec.NamespaceDefaultResourceQuota.Limit.Pods = "" - projectTemplate.Spec.ResourceQuota.Limit.Pods = "" - createdProject, createdNamespace, err := createProjectAndNamespace(standardUserClient, pr.cluster.ID, projectTemplate) - require.NoError(pr.T(), err) - - log.Info("Verify that the namespace has the label and annotation referencing the project.") - updatedNamespace, err := namespaces.GetNamespaceByName(standardUserClient, pr.cluster.ID, createdNamespace.Name) - require.NoError(pr.T(), err) - err = checkNamespaceLabelsAndAnnotations(pr.cluster.ID, createdProject.Name, updatedNamespace) - require.NoError(pr.T(), err) - - log.Info("Verify that the namespace does not have the annotation: field.cattle.io/resourceQuota.") - err = checkAnnotationExistsInNamespace(standardUserClient, pr.cluster.ID, updatedNamespace.Name, resourceQuotaAnnotation, false) - require.NoError(pr.T(), err, "'field.cattle.io/resourceQuota' annotation should not exist") - - log.Info("Create a deployment in the namespace with ten replicas.") - deployment, err := deployment.CreateDeployment(standardUserClient, createdProject.Namespace, updatedNamespace.Name, 10, "", "", false, false) - require.NoError(pr.T(), err, "Failed to create deployment in the namespace") - - log.Info("Verify that there are ten pods created in the deployment and they are in Running state.") - err = charts.WatchAndWaitDeployments(standardUserClient, createdProject.Namespace, updatedNamespace.Name, metav1.ListOptions{ - FieldSelector: "metadata.name=" + deployment.Name, - }) - require.NoError(pr.T(), err) - - log.Info("Create another project in the downstream cluster.") - projectTemplate = projects.NewProjectTemplate(pr.cluster.ID) - createdProject2, err := standardUserClient.WranglerContext.Mgmt.Project().Create(projectTemplate) - require.NoError(pr.T(), err, "Failed to create project") - err = project.WaitForProjectFinalizerToUpdate(pr.client, createdProject2.Name, createdProject2.Namespace, 2) - require.NoError(pr.T(), err) - - log.Info("Move the namespace from the first project to the second project.") - currentNamespace, err := namespaces.GetNamespaceByName(standardUserClient, pr.cluster.ID, updatedNamespace.Name) - require.NoError(pr.T(), err) - downstreamContext, err := pr.client.WranglerContext.DownStreamClusterWranglerContext(pr.cluster.ID) - require.NoError(pr.T(), err) - - updatedNamespace.Annotations[projects.ProjectIDAnnotation] = createdProject2.Namespace + ":" + createdProject2.Name - updatedNamespace.ResourceVersion = currentNamespace.ResourceVersion - _, err = downstreamContext.Core.Namespace().Update(updatedNamespace) - require.NoError(pr.T(), err) - - log.Info("Verify that the namespace has the correct label and annotation referencing the second project.") - movedNamespace, err := namespaces.GetNamespaceByName(standardUserClient, pr.cluster.ID, updatedNamespace.Name) - require.NoError(pr.T(), err) - err = checkNamespaceLabelsAndAnnotations(pr.cluster.ID, createdProject2.Name, movedNamespace) - require.NoError(pr.T(), err) - - log.Info("Verify that the namespace does not have the annotation: field.cattle.io/resourceQuota.") - err = checkAnnotationExistsInNamespace(standardUserClient, pr.cluster.ID, updatedNamespace.Name, resourceQuotaAnnotation, false) - require.NoError(pr.T(), err, "'field.cattle.io/resourceQuota' annotation should not exist") - - log.Info("Verify that the deployment is in Active state and all pods in the deployment are in Running state.") - err = charts.WatchAndWaitDeployments(standardUserClient, pr.cluster.ID, movedNamespace.Name, metav1.ListOptions{ - FieldSelector: "metadata.name=" + deployment.Name, - }) - require.NoError(pr.T(), err) -} - func TestProjectsTestSuite(t *testing.T) { suite.Run(t, new(ProjectsTestSuite)) }