From ec57ad79b9b3de42bd26c29c96754b9eb7756c8b Mon Sep 17 00:00:00 2001 From: PoAn Yang Date: Thu, 30 Nov 2023 19:52:04 +0800 Subject: [PATCH] [YUNIKORN-1956] Add wildcard user/group limit e2e tests Signed-off-by: PoAn Yang --- .../user_group_limit/user_group_limit_test.go | 192 ++++++++++++++++++ 1 file changed, 192 insertions(+) diff --git a/test/e2e/user_group_limit/user_group_limit_test.go b/test/e2e/user_group_limit/user_group_limit_test.go index 8475cf9a3..9d4297215 100644 --- a/test/e2e/user_group_limit/user_group_limit_test.go +++ b/test/e2e/user_group_limit/user_group_limit_test.go @@ -379,6 +379,198 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1}) }) + ginkgo.It("Verify_maxresources_with_a_wildcard_user_limit", func() { + ginkgo.By("Update config") + annotation = "ann-" + common.RandSeq(10) + // The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated. + yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() { + yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error { + // remove placement rules so we can control queue + sc.Partitions[0].PlacementRules = nil + + return common.AddQueue(sc, "default", "root", configs.QueueConfig{ + Name: "sandbox1", + Limits: []configs.Limit{ + { + Limit: "user entry", + Users: []string{user1}, + MaxApplications: 2, + MaxResources: map[string]string{ + siCommon.Memory: fmt.Sprintf("%dM", largeMem), + }, + }, + { + Limit: "wildcard user entry", + Users: []string{"*"}, + MaxApplications: 2, + MaxResources: map[string]string{ + siCommon.Memory: fmt.Sprintf("%dM", mediumMem), + }, + }, + }, + }) + }) + }) + + // usergroup1 can deploy 2 sleep pods to root.sandbox1 + usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than user entry limit") + usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to user entry limit") + checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) + + // usergroup2 can deploy 1 sleep pod to root.sandbox1 + usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} + usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard user entry limit") + + // usergroup2 can't deploy the second sleep pod to root.sandbox1 + deploySleepPod(usergroup2, "root.sandbox1", false, "because final memory usage is more than wildcard maxresources") + checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1}) + }) + + ginkgo.It("Verify_maxapplications_with_a_wildcard_user_limit", func() { + ginkgo.By("Update config") + annotation = "ann-" + common.RandSeq(10) + // The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated. + yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() { + yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error { + // remove placement rules so we can control queue + sc.Partitions[0].PlacementRules = nil + + return common.AddQueue(sc, "default", "root", configs.QueueConfig{ + Name: "sandbox1", + Limits: []configs.Limit{ + { + Limit: "user entry", + Users: []string{user1}, + MaxApplications: 2, + MaxResources: map[string]string{ + siCommon.Memory: fmt.Sprintf("%dM", largeMem), + }, + }, + { + Limit: "wildcard user entry", + Users: []string{"*"}, + MaxApplications: 1, + MaxResources: map[string]string{ + siCommon.Memory: fmt.Sprintf("%dM", largeMem), + }, + }, + }, + }) + }) + }) + + // usergroup1 can deploy 2 sleep pods to root.sandbox1 + usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than user entry limit") + usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to user entry limit") + checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) + + // usergroup2 can deploy 1 sleep pod to root.sandbox1 + usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} + usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard user entry limit") + + // usergroup2 can't deploy the second sleep pod to root.sandbox1 + deploySleepPod(usergroup2, "root.sandbox1", false, "because final application count is more than wildcard maxapplications") + checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1}) + }) + + ginkgo.It("Verify_maxresources_with_a_wildcard_group_limit", func() { + ginkgo.By("Update config") + annotation = "ann-" + common.RandSeq(10) + // The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated. + yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() { + yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error { + // remove placement rules so we can control queue + sc.Partitions[0].PlacementRules = nil + + return common.AddQueue(sc, "default", "root", configs.QueueConfig{ + Name: "sandbox1", + Limits: []configs.Limit{ + { + Limit: "group entry", + Groups: []string{group1}, + MaxApplications: 2, + MaxResources: map[string]string{ + siCommon.Memory: fmt.Sprintf("%dM", largeMem), + }, + }, + { + Limit: "wildcard group entry", + Groups: []string{"*"}, + MaxApplications: 2, + MaxResources: map[string]string{ + siCommon.Memory: fmt.Sprintf("%dM", mediumMem), + }, + }, + }, + }) + }) + }) + + // usergroup1 can deploy 2 sleep pods to root.sandbox1 + usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than user entry limit") + usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to user entry limit") + checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) + + // usergroup2 can deploy 1 sleep pod to root.sandbox1 + usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} + usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard user entry limit") + + // usergroup2 can't deploy the second sleep pod to root.sandbox1 + deploySleepPod(usergroup2, "root.sandbox1", false, "because final memory usage is more than wildcard maxresources") + checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1}) + }) + + ginkgo.It("Verify_maxapplications_with_a_wildcard_group_limit", func() { + ginkgo.By("Update config") + annotation = "ann-" + common.RandSeq(10) + // The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated. + yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() { + yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error { + // remove placement rules so we can control queue + sc.Partitions[0].PlacementRules = nil + + return common.AddQueue(sc, "default", "root", configs.QueueConfig{ + Name: "sandbox1", + Limits: []configs.Limit{ + { + Limit: "group entry", + Groups: []string{group1}, + MaxApplications: 2, + MaxResources: map[string]string{ + siCommon.Memory: fmt.Sprintf("%dM", largeMem), + }, + }, + { + Limit: "wildcard group entry", + Groups: []string{"*"}, + MaxApplications: 1, + MaxResources: map[string]string{ + siCommon.Memory: fmt.Sprintf("%dM", largeMem), + }, + }, + }, + }) + }) + }) + + // usergroup1 can deploy 2 sleep pods to root.sandbox1 + usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than group entry limit") + usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to group entry limit") + checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) + + // usergroup2 can deploy 1 sleep pod to root.sandbox1 + usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} + usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard group entry limit") + + // usergroup2 can't deploy the second sleep pod to root.sandbox1 + deploySleepPod(usergroup2, "root.sandbox1", false, "because final application count is more than wildcard maxapplications") + checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1}) + }) + ginkgo.AfterEach(func() { testDescription := ginkgo.CurrentSpecReport() if testDescription.Failed() {