diff --git a/test/e2e/user_group_limit/user_group_limit_test.go b/test/e2e/user_group_limit/user_group_limit_test.go index 9d4297215..a6f4dbb2b 100644 --- a/test/e2e/user_group_limit/user_group_limit_test.go +++ b/test/e2e/user_group_limit/user_group_limit_test.go @@ -44,14 +44,16 @@ import ( type TestType int const ( - largeMem = 100 - mediumMem = 50 - smallMem = 30 - sleepPodMem = 99 - user1 = "user1" - user2 = "user2" - group1 = "group1" - group2 = "group2" + largeMem = 100 + mediumMem = 50 + smallMem = 30 + sleepPodMem = 99 + user1 = "user1" + user2 = "user2" + group1 = "group1" + group2 = "group2" + sandboxQueue1 = "root.sandbox1" + sandboxQueue2 = "root.sandbox2" userTestType TestType = iota groupTestType @@ -110,7 +112,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // remove placement rules so we can control queue sc.Partitions[0].PlacementRules = nil - if err := common.AddQueue(sc, "default", "root", configs.QueueConfig{ + if err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{ Name: "sandbox1", Limits: []configs.Limit{ { @@ -125,7 +127,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { }); err != nil { return err } - return common.AddQueue(sc, "default", "root", configs.QueueConfig{Name: "sandbox2"}) + return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"}) }) }) @@ -133,20 +135,20 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} // usergroup1 can't deploy the second sleep pod to root.sandbox1 - usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because memory usage is less than maxresources") - deploySleepPod(usergroup1, "root.sandbox1", false, "because final memory usage is more than maxresources") - checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1}) + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because memory usage is less than maxresources") + deploySleepPod(usergroup1, sandboxQueue1, false, "because final memory usage is more than maxresources") + checkUsage(userTestType, user1, sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1}) // usergroup1 can deploy 2 sleep pods to root.sandbox2 - usergroup1Sandbox2Pod1 := deploySleepPod(usergroup1, "root.sandbox2", true, "because there is no limit in root.sandbox2") - usergroup1Sandbox2Pod2 := deploySleepPod(usergroup1, "root.sandbox2", true, "because there is no limit in root.sandbox2") - checkUsage(userTestType, user1, "root.sandbox2", []*v1.Pod{usergroup1Sandbox2Pod1, usergroup1Sandbox2Pod2}) + usergroup1Sandbox2Pod1 := deploySleepPod(usergroup1, sandboxQueue2, true, "because there is no limit in root.sandbox2") + usergroup1Sandbox2Pod2 := deploySleepPod(usergroup1, sandboxQueue2, true, "because there is no limit in root.sandbox2") + checkUsage(userTestType, user1, sandboxQueue2, []*v1.Pod{usergroup1Sandbox2Pod1, usergroup1Sandbox2Pod2}) // usergroup2 can deploy 2 sleep pods to root.sandbox1 usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} - usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, fmt.Sprintf("because there is no limit for %s", usergroup2)) - usergroup2Sandbox1Pod2 := deploySleepPod(usergroup2, "root.sandbox1", true, fmt.Sprintf("because there is no limit for %s", usergroup2)) - checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1, usergroup2Sandbox1Pod2}) + usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, sandboxQueue1, true, fmt.Sprintf("because there is no limit for %s", usergroup2)) + usergroup2Sandbox1Pod2 := deploySleepPod(usergroup2, sandboxQueue1, true, fmt.Sprintf("because there is no limit for %s", usergroup2)) + checkUsage(userTestType, user2, sandboxQueue1, []*v1.Pod{usergroup2Sandbox1Pod1, usergroup2Sandbox1Pod2}) }) ginkgo.It("Verify_maxapplications_with_a_specific_user_limit", func() { @@ -158,7 +160,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // remove placement rules so we can control queue sc.Partitions[0].PlacementRules = nil - if err := common.AddQueue(sc, "default", "root", configs.QueueConfig{ + if err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{ Name: "sandbox1", Limits: []configs.Limit{ { @@ -173,7 +175,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { }); err != nil { return err } - return common.AddQueue(sc, "default", "root", configs.QueueConfig{Name: "sandbox2"}) + return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"}) }) }) @@ -181,20 +183,20 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} // usergroup1 can't deploy the second sleep pod to root.sandbox1 - usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because application count is less than maxapplications") - deploySleepPod(usergroup1, "root.sandbox1", false, "because final application count is more than maxapplications") - checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1}) + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because application count is less than maxapplications") + deploySleepPod(usergroup1, sandboxQueue1, false, "because final application count is more than maxapplications") + checkUsage(userTestType, user1, sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1}) // usergroup1 can deploy 2 sleep pods to root.sandbox2 - usergroup1Sandbox2Pod1 := deploySleepPod(usergroup1, "root.sandbox2", true, "because there is no limit in root.sandbox2") - usergroup1Sandbox2Pod2 := deploySleepPod(usergroup1, "root.sandbox2", true, "because there is no limit in root.sandbox2") - checkUsage(userTestType, user1, "root.sandbox2", []*v1.Pod{usergroup1Sandbox2Pod1, usergroup1Sandbox2Pod2}) + usergroup1Sandbox2Pod1 := deploySleepPod(usergroup1, sandboxQueue2, true, "because there is no limit in root.sandbox2") + usergroup1Sandbox2Pod2 := deploySleepPod(usergroup1, sandboxQueue2, true, "because there is no limit in root.sandbox2") + checkUsage(userTestType, user1, sandboxQueue2, []*v1.Pod{usergroup1Sandbox2Pod1, usergroup1Sandbox2Pod2}) // usergroup2 can deploy 2 sleep pods to root.sandbox1 usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} - usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, fmt.Sprintf("because there is no limit for %s", usergroup2)) - usergroup2Sandbox1Pod2 := deploySleepPod(usergroup2, "root.sandbox1", true, fmt.Sprintf("because there is no limit for %s", usergroup2)) - checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1, usergroup2Sandbox1Pod2}) + usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, sandboxQueue1, true, fmt.Sprintf("because there is no limit for %s", usergroup2)) + usergroup2Sandbox1Pod2 := deploySleepPod(usergroup2, sandboxQueue1, true, fmt.Sprintf("because there is no limit for %s", usergroup2)) + checkUsage(userTestType, user2, sandboxQueue1, []*v1.Pod{usergroup2Sandbox1Pod1, usergroup2Sandbox1Pod2}) }) ginkgo.It("Verify_maxresources_with_a_specific_group_limit", func() { @@ -206,7 +208,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // remove placement rules so we can control queue sc.Partitions[0].PlacementRules = nil - if err := common.AddQueue(sc, "default", "root", configs.QueueConfig{ + if err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{ Name: "sandbox1", Limits: []configs.Limit{ { @@ -221,7 +223,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { }); err != nil { return err } - return common.AddQueue(sc, "default", "root", configs.QueueConfig{Name: "sandbox2"}) + return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"}) }) }) @@ -229,18 +231,18 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} // usergroup1 can't deploy the second sleep pod to root.sandbox1 - usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because memory usage is less than maxresources") - _ = deploySleepPod(usergroup1, "root.sandbox1", false, "because final memory usage is more than maxresources") - checkUsage(groupTestType, group1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1}) + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because memory usage is less than maxresources") + _ = deploySleepPod(usergroup1, sandboxQueue1, false, "because final memory usage is more than maxresources") + checkUsage(groupTestType, group1, sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1}) // usergroup1 can deploy 2 sleep pods to root.sandbox2 - deploySleepPod(usergroup1, "root.sandbox2", true, "because there is no limit in root.sandbox2") - deploySleepPod(usergroup1, "root.sandbox2", true, "because there is no limit in root.sandbox2") + deploySleepPod(usergroup1, sandboxQueue2, true, "because there is no limit in root.sandbox2") + deploySleepPod(usergroup1, sandboxQueue2, true, "because there is no limit in root.sandbox2") // usergroup2 can deploy 2 sleep pods to root.sandbox1 usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} - deploySleepPod(usergroup2, "root.sandbox1", true, fmt.Sprintf("because there is no limit for %s", usergroup2)) - deploySleepPod(usergroup2, "root.sandbox1", true, fmt.Sprintf("because there is no limit for %s", usergroup2)) + deploySleepPod(usergroup2, sandboxQueue1, true, fmt.Sprintf("because there is no limit for %s", usergroup2)) + deploySleepPod(usergroup2, sandboxQueue1, true, fmt.Sprintf("because there is no limit for %s", usergroup2)) }) ginkgo.It("Verify_maxapplications_with_a_specific_group_limit", func() { @@ -252,7 +254,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // remove placement rules so we can control queue sc.Partitions[0].PlacementRules = nil - if err := common.AddQueue(sc, "default", "root", configs.QueueConfig{ + if err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{ Name: "sandbox1", Limits: []configs.Limit{ { @@ -267,7 +269,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { }); err != nil { return err } - return common.AddQueue(sc, "default", "root", configs.QueueConfig{Name: "sandbox2"}) + return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"}) }) }) @@ -275,18 +277,18 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} // usergroup1 can't deploy the second sleep pod to root.sandbox1 - usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because application count is less than maxapplications") - _ = deploySleepPod(usergroup1, "root.sandbox1", false, "because final application count is more than maxapplications") - checkUsage(groupTestType, group1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1}) + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because application count is less than maxapplications") + _ = deploySleepPod(usergroup1, sandboxQueue1, false, "because final application count is more than maxapplications") + checkUsage(groupTestType, group1, sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1}) // usergroup1 can deploy 2 sleep pods to root.sandbox2 - deploySleepPod(usergroup1, "root.sandbox2", true, "because there is no limit in root.sandbox2") - deploySleepPod(usergroup1, "root.sandbox2", true, "because there is no limit in root.sandbox2") + deploySleepPod(usergroup1, sandboxQueue2, true, "because there is no limit in root.sandbox2") + deploySleepPod(usergroup1, sandboxQueue2, true, "because there is no limit in root.sandbox2") // usergroup2 can deploy 2 sleep pods to root.sandbox1 usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} - deploySleepPod(usergroup2, "root.sandbox1", true, fmt.Sprintf("because there is no limit for %s", usergroup2)) - deploySleepPod(usergroup2, "root.sandbox1", true, fmt.Sprintf("because there is no limit for %s", usergroup2)) + deploySleepPod(usergroup2, sandboxQueue1, true, fmt.Sprintf("because there is no limit for %s", usergroup2)) + deploySleepPod(usergroup2, sandboxQueue1, true, fmt.Sprintf("because there is no limit for %s", usergroup2)) }) ginkgo.It("Verify_maxresources_with_user_limit_lower_than_group_limit", func() { @@ -298,7 +300,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // remove placement rules so we can control queue sc.Partitions[0].PlacementRules = nil - if err := common.AddQueue(sc, "default", "root", configs.QueueConfig{ + if err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{ Name: "sandbox1", Limits: []configs.Limit{ { @@ -321,7 +323,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { }); err != nil { return err } - return common.AddQueue(sc, "default", "root", configs.QueueConfig{Name: "sandbox2"}) + return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"}) }) }) @@ -329,9 +331,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} // usergroup1 can't deploy the second sleep pod to root.sandbox1 - usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because memory usage is less than maxresources") - deploySleepPod(usergroup1, "root.sandbox1", false, "because final memory usage is more than maxresources in user limit") - checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1}) + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because memory usage is less than maxresources") + deploySleepPod(usergroup1, sandboxQueue1, false, "because final memory usage is more than maxresources in user limit") + checkUsage(userTestType, user1, sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1}) }) ginkgo.It("Verify_maxresources_with_group_limit_lower_than_user_limit", func() { @@ -343,7 +345,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // remove placement rules so we can control queue sc.Partitions[0].PlacementRules = nil - if err := common.AddQueue(sc, "default", "root", configs.QueueConfig{ + if err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{ Name: "sandbox1", Limits: []configs.Limit{ { @@ -366,7 +368,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { }); err != nil { return err } - return common.AddQueue(sc, "default", "root", configs.QueueConfig{Name: "sandbox2"}) + return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"}) }) }) @@ -374,9 +376,9 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} // usergroup1 can't deploy the second sleep pod to root.sandbox1 - usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because memory usage is less than maxresources") - _ = deploySleepPod(usergroup1, "root.sandbox1", false, "because final memory usage is more than maxresources in group limit") - checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1}) + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because memory usage is less than maxresources") + _ = deploySleepPod(usergroup1, sandboxQueue1, false, "because final memory usage is more than maxresources in group limit") + checkUsage(userTestType, user1, sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1}) }) ginkgo.It("Verify_maxresources_with_a_wildcard_user_limit", func() { @@ -388,7 +390,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // remove placement rules so we can control queue sc.Partitions[0].PlacementRules = nil - return common.AddQueue(sc, "default", "root", configs.QueueConfig{ + return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{ Name: "sandbox1", Limits: []configs.Limit{ { @@ -414,17 +416,17 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // usergroup1 can deploy 2 sleep pods to root.sandbox1 usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} - usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than user entry limit") - usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to user entry limit") - checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because usage is less than user entry limit") + usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, sandboxQueue1, true, "because usage is equal to user entry limit") + checkUsage(userTestType, user1, sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) // usergroup2 can deploy 1 sleep pod to root.sandbox1 usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} - usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard user entry limit") + usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, sandboxQueue1, true, "because usage is less than wildcard user entry limit") // usergroup2 can't deploy the second sleep pod to root.sandbox1 - deploySleepPod(usergroup2, "root.sandbox1", false, "because final memory usage is more than wildcard maxresources") - checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1}) + deploySleepPod(usergroup2, sandboxQueue1, false, "because final memory usage is more than wildcard maxresources") + checkUsage(userTestType, user2, sandboxQueue1, []*v1.Pod{usergroup2Sandbox1Pod1}) }) ginkgo.It("Verify_maxapplications_with_a_wildcard_user_limit", func() { @@ -436,7 +438,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // remove placement rules so we can control queue sc.Partitions[0].PlacementRules = nil - return common.AddQueue(sc, "default", "root", configs.QueueConfig{ + return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{ Name: "sandbox1", Limits: []configs.Limit{ { @@ -462,17 +464,17 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // usergroup1 can deploy 2 sleep pods to root.sandbox1 usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} - usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than user entry limit") - usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to user entry limit") - checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because usage is less than user entry limit") + usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, sandboxQueue1, true, "because usage is equal to user entry limit") + checkUsage(userTestType, user1, sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) // usergroup2 can deploy 1 sleep pod to root.sandbox1 usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} - usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard user entry limit") + usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, sandboxQueue1, true, "because usage is less than wildcard user entry limit") // usergroup2 can't deploy the second sleep pod to root.sandbox1 - deploySleepPod(usergroup2, "root.sandbox1", false, "because final application count is more than wildcard maxapplications") - checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1}) + deploySleepPod(usergroup2, sandboxQueue1, false, "because final application count is more than wildcard maxapplications") + checkUsage(userTestType, user2, sandboxQueue1, []*v1.Pod{usergroup2Sandbox1Pod1}) }) ginkgo.It("Verify_maxresources_with_a_wildcard_group_limit", func() { @@ -484,7 +486,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // remove placement rules so we can control queue sc.Partitions[0].PlacementRules = nil - return common.AddQueue(sc, "default", "root", configs.QueueConfig{ + return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{ Name: "sandbox1", Limits: []configs.Limit{ { @@ -510,17 +512,17 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // usergroup1 can deploy 2 sleep pods to root.sandbox1 usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} - usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than user entry limit") - usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to user entry limit") - checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because usage is less than user entry limit") + usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, sandboxQueue1, true, "because usage is equal to user entry limit") + checkUsage(userTestType, user1, sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) // usergroup2 can deploy 1 sleep pod to root.sandbox1 usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} - usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard user entry limit") + usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, sandboxQueue1, true, "because usage is less than wildcard user entry limit") // usergroup2 can't deploy the second sleep pod to root.sandbox1 - deploySleepPod(usergroup2, "root.sandbox1", false, "because final memory usage is more than wildcard maxresources") - checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1}) + deploySleepPod(usergroup2, sandboxQueue1, false, "because final memory usage is more than wildcard maxresources") + checkUsage(userTestType, user2, sandboxQueue1, []*v1.Pod{usergroup2Sandbox1Pod1}) }) ginkgo.It("Verify_maxapplications_with_a_wildcard_group_limit", func() { @@ -532,7 +534,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // remove placement rules so we can control queue sc.Partitions[0].PlacementRules = nil - return common.AddQueue(sc, "default", "root", configs.QueueConfig{ + return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{ Name: "sandbox1", Limits: []configs.Limit{ { @@ -558,17 +560,17 @@ var _ = ginkgo.Describe("UserGroupLimit", func() { // usergroup1 can deploy 2 sleep pods to root.sandbox1 usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}} - usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than group entry limit") - usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to group entry limit") - checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) + usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because usage is less than group entry limit") + usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, sandboxQueue1, true, "because usage is equal to group entry limit") + checkUsage(userTestType, user1, sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2}) // usergroup2 can deploy 1 sleep pod to root.sandbox1 usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}} - usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard group entry limit") + usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, sandboxQueue1, true, "because usage is less than wildcard group entry limit") // usergroup2 can't deploy the second sleep pod to root.sandbox1 - deploySleepPod(usergroup2, "root.sandbox1", false, "because final application count is more than wildcard maxapplications") - checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1}) + deploySleepPod(usergroup2, sandboxQueue1, false, "because final application count is more than wildcard maxapplications") + checkUsage(userTestType, user2, sandboxQueue1, []*v1.Pod{usergroup2Sandbox1Pod1}) }) ginkgo.AfterEach(func() {