Skip to content

Commit

Permalink
[YUNIKORN-2715] Handle special characters for params like queue, user…
Browse files Browse the repository at this point in the history
…name & groupname (apache#867)

Closes: apache#867

Signed-off-by: Manikandan R <[email protected]>
  • Loading branch information
manirajv06 committed Jul 16, 2024
1 parent e29ba8f commit f281908
Show file tree
Hide file tree
Showing 2 changed files with 184 additions and 0 deletions.
109 changes: 109 additions & 0 deletions test/e2e/configmap/configmap_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,12 @@ import (
"io"
"time"

"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/apache/yunikorn-core/pkg/common/configs"
"github.com/apache/yunikorn-k8shim/pkg/common/constants"
tests "github.com/apache/yunikorn-k8shim/test/e2e"
"github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
"github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
Expand Down Expand Up @@ -100,6 +103,112 @@ var _ = Describe("ConfigMap", func() {
checkSchedulerConfig(schedulerConfig)
})

It("Configure the scheduler with an valid queue name", func() {
validConfig := `
partitions:
- name: default
placementrules:
- name: tag
value: namespace
create: true
queues:
- name: root_Test-a_b_#_c_#_d_/_e@dom:ain
submitacl: '*'
`
data := map[string]string{"queues.yaml": validConfig}
validConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: constants.ConfigMapName,
Namespace: configmanager.YuniKornTestConfig.YkNamespace,
},
Data: data,
}
cm, err := kClient.UpdateConfigMap(validConfigMap, configmanager.YuniKornTestConfig.YkNamespace)
gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
gomega.Ω(cm).ShouldNot(gomega.BeNil())
})

It("Configure the scheduler with an invalid queue name", func() {
invalidConfig := `
partitions:
- name: default
placementrules:
- name: tag
value: namespace
create: true
queues:
- name: ro!ot
submitacl: '*'
`
invalidConfigData := map[string]string{"queues.yaml": invalidConfig}
invalidConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: constants.ConfigMapName,
Namespace: configmanager.YuniKornTestConfig.YkNamespace,
},
Data: invalidConfigData,
}
_, invalidConfigErr := kClient.UpdateConfigMap(invalidConfigMap, configmanager.YuniKornTestConfig.YkNamespace)
gomega.Ω(invalidConfigErr).Should(gomega.HaveOccurred())
})

It("Configure the scheduler with an valid user name in placement rule filter", func() {
validConfig := `
partitions:
- name: default
placementrules:
- name: fixed
value: root_Test-a_b_#_c_#_d_/_e@dom:ain
create: true
filter:
type: allow
users:
- user_Test-a_b_#_c_#_d_/_e@dom:ain.com
queues:
- name: root
submitacl: '*'
`
data := map[string]string{"queues.yaml": validConfig}
validConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: constants.ConfigMapName,
Namespace: configmanager.YuniKornTestConfig.YkNamespace,
},
Data: data,
}
cm, err := kClient.UpdateConfigMap(validConfigMap, configmanager.YuniKornTestConfig.YkNamespace)
gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
gomega.Ω(cm).ShouldNot(gomega.BeNil())
})

It("Configure the scheduler with an invalid user name in placement rule filter", func() {
invalidConfig := `
partitions:
- name: default
placementrules:
- name: fixed
value: root_Test-a_b_#_c_#_d_/_e@dom:ain
create: true
filter:
type: allow
users:
- user_inva!lid
queues:
- name: root
submitacl: '*'
`
invalidConfigData := map[string]string{"queues.yaml": invalidConfig}
invalidConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: constants.ConfigMapName,
Namespace: configmanager.YuniKornTestConfig.YkNamespace,
},
Data: invalidConfigData,
}
_, invalidConfigErr := kClient.UpdateConfigMap(invalidConfigMap, configmanager.YuniKornTestConfig.YkNamespace)
gomega.Ω(invalidConfigErr).Should(gomega.HaveOccurred())
})

AfterEach(func() {
tests.DumpClusterInfoIfSpecFailed(suiteName, []string{"default"})
yunikorn.RestoreConfigMapWrapper(oldConfigMap)
Expand Down
75 changes: 75 additions & 0 deletions test/e2e/user_group_limit/user_group_limit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ package user_group_limit_test
import (
"encoding/json"
"fmt"
"net/url"
"runtime"
"time"

Expand Down Expand Up @@ -566,6 +567,80 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
checkUsage(userTestType, user2, sandboxQueue1, []*v1.Pod{usergroup2Sandbox1Pod1})
})

ginkgo.It("Verify_maxresources_with_a_valid_user_name_and_specific_user_limit", func() {
ginkgo.By("Update config")
validUser := "user_Test-a_b_#_c_#_d_/_e@dom:ain.com"
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

if err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Limit: "user entry",
Users: []string{validUser},
MaxApplications: 2,
MaxResources: map[string]string{
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
},
},
},
}); err != nil {
return err
}
return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"})
})
})

// usergroup1 can deploy the first sleep pod to root.sandbox1
usergroup1 := &si.UserGroupInformation{User: validUser, Groups: []string{group1}}

// usergroup1 can't deploy the second sleep pod to root.sandbox1
usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because memory usage is less than maxresources")
deploySleepPod(usergroup1, sandboxQueue1, false, "because final memory usage is more than maxresources")
checkUsage(userTestType, url.QueryEscape(validUser), sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1})
})

ginkgo.It("Verify_maxresources_with_a_valid_group_name_and_specific_group_limit", func() {
ginkgo.By("Update config")
validGroup := "group_Test-a_b_dom:ain.com"
// The wait wrapper still can't fully guarantee that the config in AdmissionController has been updated.
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "", admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
// remove placement rules so we can control queue
sc.Partitions[0].PlacementRules = nil

if err := common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
Name: "sandbox1",
Limits: []configs.Limit{
{
Limit: "group entry",
Groups: []string{validGroup},
MaxApplications: 2,
MaxResources: map[string]string{
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
},
},
},
}); err != nil {
return err
}
return common.AddQueue(sc, constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name: "sandbox2"})
})
})

// usergroup1 can deploy the first sleep pod to root.sandbox1
usergroup1 := &si.UserGroupInformation{User: user1, Groups: []string{validGroup}}

// usergroup1 can't deploy the second sleep pod to root.sandbox1
usergroup1Sandbox1Pod1 := deploySleepPod(usergroup1, sandboxQueue1, true, "because memory usage is less than maxresources")
_ = deploySleepPod(usergroup1, sandboxQueue1, false, "because final memory usage is more than maxresources")
checkUsage(groupTestType, url.QueryEscape(validGroup), sandboxQueue1, []*v1.Pod{usergroup1Sandbox1Pod1})
})

ginkgo.AfterEach(func() {
tests.DumpClusterInfoIfSpecFailed(suiteName, []string{ns.Name})

Expand Down

0 comments on commit f281908

Please sign in to comment.