diff --git a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/daemonset.go b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/daemonset.go index 3336db75b6a..6d1268d650a 100644 --- a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/daemonset.go +++ b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/daemonset.go @@ -93,7 +93,18 @@ func (k *Kubernetes) updateDaemonSet(ctx context.Context, ds *appsv1.DaemonSet, return errors.New(reason) } } - return k.ds.Update(ds) + if err = k.ds.Update(ds); err != nil { + logrus.Errorf("failed to update daemonset, name: %s, (%v)", ds.Name, err) + return err + } + if service.K8SSnippet == nil || service.K8SSnippet.Container == nil { + return nil + } + if err = k.ds.Patch(ds.Namespace, ds.Name, service.Name, (corev1.Container)(*service.K8SSnippet.Container)); err != nil { + logrus.Errorf("failed to patch daemonset, name: %s, (%v)", ds.Name, err) + return err + } + return nil } func (k *Kubernetes) getDaemonSetDeltaResource(ctx context.Context, ds *appsv1.DaemonSet) (deltaCPU, deltaMemory int64, err error) { @@ -311,5 +322,7 @@ func (k *Kubernetes) newDaemonSet(service *apistructs.Service, sg *apistructs.Se logrus.Debugf("show k8s daemonset, name: %s, daemonset: %+v", daemonSetName, daemonset) + k.runAsDefaultUser(&daemonset.Spec.Template.Spec) + return daemonset, nil } diff --git a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/deployment.go b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/deployment.go index b5ddb58aa6d..e34436a2a0e 100644 --- a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/deployment.go +++ b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/deployment.go @@ -688,6 +688,8 @@ func (k *Kubernetes) newDeployment(service *apistructs.Service, serviceGroup *ap return nil, err } logrus.Debugf("show k8s deployment, name: %s, deployment: %+v", deploymentName, deployment) + + k.runAsDefaultUser(&deployment.Spec.Template.Spec) return deployment, nil } diff --git a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/job.go b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/job.go index 97461988e06..1cb14be00d7 100644 --- a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/job.go +++ b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/job.go @@ -187,6 +187,8 @@ func (k *Kubernetes) newJob(service *apistructs.Service, serviceGroup *apistruct k.AddSpotEmptyDir(&job.Spec.Template.Spec, service.Resources.EmptyDirCapacity) job.Spec.Template.Spec.RestartPolicy = apiv1.RestartPolicyNever + + k.runAsDefaultUser(&job.Spec.Template.Spec) return job, nil } diff --git a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/k8s.go b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/k8s.go index 6ed74201327..6459a4d9795 100644 --- a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/k8s.go +++ b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/k8s.go @@ -2058,3 +2058,24 @@ func (k *Kubernetes) DeployInEdgeCluster() bool { return true } + +// run as default user +func (k *Kubernetes) runAsDefaultUser(spec *apiv1.PodSpec) { + if spec == nil || spec.Containers == nil { + logrus.WithField("PodSpec", spec).Info("Invalid PodSpec, skip configure run as default user") + return + } + for i := range spec.Containers { + if spec.Containers[i].SecurityContext == nil { + spec.Containers[i].SecurityContext = &apiv1.SecurityContext{} + } + + if spec.Containers[i].SecurityContext.RunAsUser == nil { + spec.Containers[i].SecurityContext.RunAsUser = &types.DefaultContainerUserId + } + + if spec.Containers[i].SecurityContext.RunAsGroup == nil { + spec.Containers[i].SecurityContext.RunAsGroup = &types.DefaultContainerGroupId + } + } +} diff --git a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/k8s_test.go b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/k8s_test.go index 71db78019fb..1d71e0406da 100644 --- a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/k8s_test.go +++ b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/k8s_test.go @@ -603,3 +603,12 @@ func Test_ConvertToKedaScaledObject(t *testing.T) { object := convertToKedaScaledObject(scaled) t.Logf("%v", object) } + +func TestRunAsUser(t *testing.T) { + kubernetes := Kubernetes{} + kubernetes.runAsDefaultUser(&apiv1.PodSpec{ + Containers: []apiv1.Container{ + {Name: "test_container"}, + }, + }) +} diff --git a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/types/type.go b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/types/type.go index 668afac40ad..a880114276c 100644 --- a/internal/tools/orchestrator/scheduler/executor/plugins/k8s/types/type.go +++ b/internal/tools/orchestrator/scheduler/executor/plugins/k8s/types/type.go @@ -42,6 +42,11 @@ const ( DiceWorkSpace = "DICE_WORKSPACE" ) +var ( + DefaultContainerUserId int64 = 1000 // `dice` user + DefaultContainerGroupId int64 = 1000 // `dice` group +) + var EnvReg = regexp.MustCompile(`\$\{([^}]+?)\}`) type StatefulsetInfo struct {