Skip to content

Commit

Permalink
[YUNIKORN-1956] Add wildcard user/group limit e2e tests
Browse files Browse the repository at this point in the history
Signed-off-by: PoAn Yang <[email protected]>
  • Loading branch information
FrankYang0529 committed Nov 30, 2023
1 parent dc32228 commit ec57ad7
Showing 1 changed file with 192 additions and 0 deletions.
192 changes: 192 additions & 0 deletions test/e2e/user_group_limit/user_group_limit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,198 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1})
})

ginkgo.It("Verify_maxresources_with_a_wildcard_user_limit", func() {
ginkgo.By("Update config")
annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

return common.AddQueue(sc, "default", "root", configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Limit: "user entry",
Users: []string{user1},
MaxApplications: 2,
MaxResources: map[string]string{
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
},
},
{
Limit: "wildcard user entry",
Users: []string{"*"},
MaxApplications: 2,
MaxResources: map[string]string{
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
},
},
},
})
})
})

// usergroup1 can deploy 2 sleep pods to root.sandbox1
usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}}
usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than user entry limit")
usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to user entry limit")
checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2})

// usergroup2 can deploy 1 sleep pod to root.sandbox1
usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}}
usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard user entry limit")

// usergroup2 can't deploy the second sleep pod to root.sandbox1
deploySleepPod(usergroup2, "root.sandbox1", false, "because final memory usage is more than wildcard maxresources")
checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1})
})

ginkgo.It("Verify_maxapplications_with_a_wildcard_user_limit", func() {
ginkgo.By("Update config")
annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

return common.AddQueue(sc, "default", "root", configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Limit: "user entry",
Users: []string{user1},
MaxApplications: 2,
MaxResources: map[string]string{
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
},
},
{
Limit: "wildcard user entry",
Users: []string{"*"},
MaxApplications: 1,
MaxResources: map[string]string{
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
},
},
},
})
})
})

// usergroup1 can deploy 2 sleep pods to root.sandbox1
usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}}
usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than user entry limit")
usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to user entry limit")
checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2})

// usergroup2 can deploy 1 sleep pod to root.sandbox1
usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}}
usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard user entry limit")

// usergroup2 can't deploy the second sleep pod to root.sandbox1
deploySleepPod(usergroup2, "root.sandbox1", false, "because final application count is more than wildcard maxapplications")
checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1})
})

ginkgo.It("Verify_maxresources_with_a_wildcard_group_limit", func() {
ginkgo.By("Update config")
annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

return common.AddQueue(sc, "default", "root", configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Limit: "group entry",
Groups: []string{group1},
MaxApplications: 2,
MaxResources: map[string]string{
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
},
},
{
Limit: "wildcard group entry",
Groups: []string{"*"},
MaxApplications: 2,
MaxResources: map[string]string{
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
},
},
},
})
})
})

// usergroup1 can deploy 2 sleep pods to root.sandbox1
usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}}
usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than user entry limit")
usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to user entry limit")
checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2})

// usergroup2 can deploy 1 sleep pod to root.sandbox1
usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}}
usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard user entry limit")

// usergroup2 can't deploy the second sleep pod to root.sandbox1
deploySleepPod(usergroup2, "root.sandbox1", false, "because final memory usage is more than wildcard maxresources")
checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1})
})

ginkgo.It("Verify_maxapplications_with_a_wildcard_group_limit", func() {
ginkgo.By("Update config")
annotation = "ann-" + common.RandSeq(10)
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", annotation, admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

return common.AddQueue(sc, "default", "root", configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Limit: "group entry",
Groups: []string{group1},
MaxApplications: 2,
MaxResources: map[string]string{
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
},
},
{
Limit: "wildcard group entry",
Groups: []string{"*"},
MaxApplications: 1,
MaxResources: map[string]string{
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
},
},
},
})
})
})

// usergroup1 can deploy 2 sleep pods to root.sandbox1
usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{group1}}
usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is less than group entry limit")
usergroup1Sandbox1Pod2 := deploySleepPod(usergroup1, "root.sandbox1", true, "because usage is equal to group entry limit")
checkUsage(userTestType, user1, "root.sandbox1", []*v1.Pod{usergroup1Sandbox1Pod1, usergroup1Sandbox1Pod2})

// usergroup2 can deploy 1 sleep pod to root.sandbox1
usergroup2 := &si.UserGroupInformation{User: user2, Groups: []string{group2}}
usergroup2Sandbox1Pod1 := deploySleepPod(usergroup2, "root.sandbox1", true, "because usage is less than wildcard group entry limit")

// usergroup2 can't deploy the second sleep pod to root.sandbox1
deploySleepPod(usergroup2, "root.sandbox1", false, "because final application count is more than wildcard maxapplications")
checkUsage(userTestType, user2, "root.sandbox1", []*v1.Pod{usergroup2Sandbox1Pod1})
})

ginkgo.AfterEach(func() {
testDescription := ginkgo.CurrentSpecReport()
if testDescription.Failed() {
Expand Down

0 comments on commit ec57ad7

Please sign in to comment.