diff --git a/pkg/KubeArmorOperator/cmd/snitch-cmd/main.go b/pkg/KubeArmorOperator/cmd/snitch-cmd/main.go index 62751a68c..59a08c350 100644 --- a/pkg/KubeArmorOperator/cmd/snitch-cmd/main.go +++ b/pkg/KubeArmorOperator/cmd/snitch-cmd/main.go @@ -126,6 +126,8 @@ func snitch() { patchNode.Metadata.Labels[common.EnforcerLabel] = nodeEnforcer patchNode.Metadata.Labels[common.RandLabel] = rand.String(4) patchNode.Metadata.Labels[common.BTFLabel] = btfPresent + patchNode.Metadata.Labels[common.ApparmorFsLabel] = enforcer.CheckIfApparmorFsPresent(PathPrefix, *Logger) + patchNode.Metadata.Labels[common.SecurityFsLabel] = enforcer.CheckIfSecurityFsPresent(PathPrefix, *Logger) patch, err := json.Marshal(patchNode) if err != nil { diff --git a/pkg/KubeArmorOperator/common/defaults.go b/pkg/KubeArmorOperator/common/defaults.go index 7773589c0..978667a7c 100644 --- a/pkg/KubeArmorOperator/common/defaults.go +++ b/pkg/KubeArmorOperator/common/defaults.go @@ -41,13 +41,20 @@ const ( var OperatorConfigCrd *opv1.KubeArmorConfig var ( - EnforcerLabel string = "kubearmor.io/enforcer" - RuntimeLabel string = "kubearmor.io/runtime" - SocketLabel string = "kubearmor.io/socket" - RandLabel string = "kubearmor.io/rand" - OsLabel string = "kubernetes.io/os" - ArchLabel string = "kubernetes.io/arch" - BTFLabel string = "kubearmor.io/btf" + // node labels + EnforcerLabel string = "kubearmor.io/enforcer" + RuntimeLabel string = "kubearmor.io/runtime" + SocketLabel string = "kubearmor.io/socket" + RandLabel string = "kubearmor.io/rand" + OsLabel string = "kubernetes.io/os" + ArchLabel string = "kubernetes.io/arch" + BTFLabel string = "kubearmor.io/btf" + ApparmorFsLabel string = "kubearmor.io/apparmorfs" + SecurityFsLabel string = "kubearmor.io/securityfs" + + // if any node with securityfs/lsm present + IfNodeWithSecurtiyFs bool = false + DeleteAction string = "DELETE" AddAction string = "ADD" Namespace string = "kubearmor" diff --git a/pkg/KubeArmorOperator/enforcer/enforcer.go b/pkg/KubeArmorOperator/enforcer/enforcer.go index bd562a558..a8d811c71 100644 --- a/pkg/KubeArmorOperator/enforcer/enforcer.go +++ b/pkg/KubeArmorOperator/enforcer/enforcer.go @@ -27,7 +27,25 @@ func CheckBtfSupport(PathPrefix string, log zap.SugaredLogger) string { return "no" } -// DetectEnforcer: detect the enforcer on the node +// CheckIfApparmorFsPresent checks if BTF is present +func CheckIfApparmorFsPresent(PathPrefix string, log zap.SugaredLogger) string { + path := PathPrefix + "/etc/apparmor.d" + if _, err := os.Stat(filepath.Clean(path)); err == nil { + return "yes" + } + return "no" +} + +// CheckIfSecurityFsPresent checks if Security filesystem is present +func CheckIfSecurityFsPresent(PathPrefix string, log zap.SugaredLogger) string { + path := PathPrefix + "/sys/kernel/security" + if _, err := os.Stat(filepath.Clean(path)); err == nil { + return "yes" + } + return "no" +} + +// DetectEnforcer detect the enforcer on the node func DetectEnforcer(lsmOrder []string, PathPrefix string, log zap.SugaredLogger) string { supportedLsms := []string{} lsm := []byte{} diff --git a/pkg/KubeArmorOperator/internal/controller/cluster.go b/pkg/KubeArmorOperator/internal/controller/cluster.go index 39359c669..dbc22f8f9 100644 --- a/pkg/KubeArmorOperator/internal/controller/cluster.go +++ b/pkg/KubeArmorOperator/internal/controller/cluster.go @@ -49,6 +49,7 @@ type Node struct { RuntimeSocket string Arch string BTF string + ApparmorFs string } func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, extClient *apiextensionsclientset.Clientset, opv1Client *opv1client.Clientset, pathPrefix, deploy_name string) *ClusterWatcher { @@ -122,7 +123,9 @@ func (clusterWatcher *ClusterWatcher) WatchNodes() { if val, ok := node.Labels[common.BTFLabel]; ok { newNode.BTF = val } - + if val, ok := node.Labels[common.ApparmorFsLabel]; ok { + newNode.ApparmorFs = val + } clusterWatcher.NodesLock.Lock() nbNodes := len(clusterWatcher.Nodes) i := 0 @@ -147,9 +150,9 @@ func (clusterWatcher *ClusterWatcher) WatchNodes() { } clusterWatcher.NodesLock.Unlock() if nodeModified { - clusterWatcher.UpdateDaemonsets(common.DeleteAction, newNode.Enforcer, newNode.Runtime, newNode.RuntimeSocket, newNode.BTF) + clusterWatcher.UpdateDaemonsets(common.DeleteAction, newNode.Enforcer, newNode.Runtime, newNode.RuntimeSocket, newNode.BTF, newNode.ApparmorFs) } - clusterWatcher.UpdateDaemonsets(common.AddAction, newNode.Enforcer, newNode.Runtime, newNode.RuntimeSocket, newNode.BTF) + clusterWatcher.UpdateDaemonsets(common.AddAction, newNode.Enforcer, newNode.Runtime, newNode.RuntimeSocket, newNode.BTF, newNode.ApparmorFs) } } else { log.Errorf("Cannot convert object to node struct") @@ -168,7 +171,7 @@ func (clusterWatcher *ClusterWatcher) WatchNodes() { } } clusterWatcher.NodesLock.Unlock() - clusterWatcher.UpdateDaemonsets(common.DeleteAction, deletedNode.Enforcer, deletedNode.Runtime, deletedNode.RuntimeSocket, deletedNode.BTF) + clusterWatcher.UpdateDaemonsets(common.DeleteAction, deletedNode.Enforcer, deletedNode.Runtime, deletedNode.RuntimeSocket, deletedNode.BTF, deletedNode.ApparmorFs) } }, }) @@ -176,7 +179,7 @@ func (clusterWatcher *ClusterWatcher) WatchNodes() { nodeInformer.Run(wait.NeverStop) } -func (clusterWatcher *ClusterWatcher) UpdateDaemonsets(action, enforcer, runtime, socket, btfPresent string) { +func (clusterWatcher *ClusterWatcher) UpdateDaemonsets(action, enforcer, runtime, socket, btfPresent, apparmorfs string) { clusterWatcher.Log.Info("updating daemonset") daemonsetName := strings.Join([]string{ "kubearmor", @@ -212,7 +215,7 @@ func (clusterWatcher *ClusterWatcher) UpdateDaemonsets(action, enforcer, runtime } } if newDaemonSet { - daemonset := generateDaemonset(daemonsetName, enforcer, runtime, socket, btfPresent) + daemonset := generateDaemonset(daemonsetName, enforcer, runtime, socket, btfPresent, apparmorfs) _, err := clusterWatcher.Client.AppsV1().DaemonSets(common.Namespace).Create(context.Background(), daemonset, v1.CreateOptions{}) if err != nil { clusterWatcher.Log.Warnf("Cannot Create daemonset %s, error=%s", daemonsetName, err.Error()) diff --git a/pkg/KubeArmorOperator/internal/controller/resources.go b/pkg/KubeArmorOperator/internal/controller/resources.go index cc13a8c42..4e85eb617 100644 --- a/pkg/KubeArmorOperator/internal/controller/resources.go +++ b/pkg/KubeArmorOperator/internal/controller/resources.go @@ -23,8 +23,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func generateDaemonset(name, enforcer, runtime, socket, btfPresent string) *appsv1.DaemonSet { - enforcerVolumes, enforcerVolumeMounts := genEnforcerVolumes(enforcer) +func generateDaemonset(name, enforcer, runtime, socket, btfPresent, apparmorfs string) *appsv1.DaemonSet { + enforcerVolumes := []corev1.Volume{} + enforcerVolumeMounts := []corev1.VolumeMount{} + if !(enforcer == "apparmor" && apparmorfs == "no") { + enforcerVolumes, enforcerVolumeMounts = genEnforcerVolumes(enforcer) + } runtimeVolumes, runtimeVolumeMounts := genRuntimeVolumes(runtime, socket) vols := []corev1.Volume{} volMnts := []corev1.VolumeMount{} @@ -321,6 +325,18 @@ func (clusterWatcher *ClusterWatcher) AreAllNodesProcessed() bool { if !(len(nodes.Items) == processedNodes) { return false } + + // check if there's any node with securityfs/lsm exists + common.IfNodeWithSecurtiyFs = false + for _, node := range nodes.Items { + if val, ok := node.Labels[common.SecurityFsLabel]; ok { + switch val { + case "yes": + common.IfNodeWithSecurtiyFs = true + } + } + } + kaPodsList, err := clusterWatcher.Client.CoreV1().Pods(common.Namespace).List(context.Background(), metav1.ListOptions{ LabelSelector: "kubearmor-app=kubearmor", }) @@ -328,6 +344,49 @@ func (clusterWatcher *ClusterWatcher) AreAllNodesProcessed() bool { } +func (clusterWatcher *ClusterWatcher) deployControllerDeployment(deployment *appsv1.Deployment) error { + if common.IfNodeWithSecurtiyFs { + deployment.Spec.Template.Spec.NodeSelector = map[string]string{ + common.SecurityFsLabel: "yes", + } + deployment.Spec.Template.Spec.Containers = deployments.GetKubeArmorControllerDeployment(common.Namespace).Spec.Template.Spec.Containers + } else { + deployment.Spec.Template.Spec.NodeSelector = nil + for i, container := range deployment.Spec.Template.Spec.Containers { + if container.Name == "manager" { + for j, mount := range container.VolumeMounts { + if mount.MountPath == "/sys/kernel/security" { + deployment.Spec.Template.Spec.Containers[i].VolumeMounts = append(deployment.Spec.Template.Spec.Containers[i].VolumeMounts[:j], + deployment.Spec.Template.Spec.Containers[i].VolumeMounts[j+1:]...) + } + } + } + } + } + controller, err := clusterWatcher.Client.AppsV1().Deployments(common.Namespace).Get(context.Background(), deployment.Name, metav1.GetOptions{}) + if isNotfound(err) { + clusterWatcher.Log.Infof("Creating deployment %s", deployment.Name) + _, err = clusterWatcher.Client.AppsV1().Deployments(common.Namespace).Create(context.Background(), deployment, metav1.CreateOptions{}) + if err != nil { + clusterWatcher.Log.Warnf("Cannot create deployment %s, error=%s", deployment.Name, err.Error()) + return err + } + } else { + if (common.IfNodeWithSecurtiyFs && controller.Spec.Template.Spec.NodeSelector == nil) || + (!common.IfNodeWithSecurtiyFs && controller.Spec.Template.Spec.NodeSelector != nil) { + clusterWatcher.Log.Infof("Updating deployment %s", controller.Name) + controller.Spec.Template.Spec.NodeSelector = deployment.Spec.Template.Spec.NodeSelector + controller.Spec.Template.Spec.Containers = deployment.Spec.Template.Spec.Containers + _, err = clusterWatcher.Client.AppsV1().Deployments(common.Namespace).Update(context.Background(), controller, metav1.UpdateOptions{}) + if err != nil { + clusterWatcher.Log.Warnf("Cannot update deployment %s, error=%s", deployment.Name, err.Error()) + return err + } + } + } + return nil +} + func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { var caCert, tlsCrt, tlsKey *bytes.Buffer var kGenErr, err, installErr error @@ -397,7 +456,6 @@ func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { relayServer.Spec.Template.Spec.Containers[0].Image = common.GetApplicationImage(common.KubeArmorRelayName) relayServer.Spec.Template.Spec.Containers[0].ImagePullPolicy = corev1.PullPolicy(common.KubeArmorRelayImagePullPolicy) deploys := []*appsv1.Deployment{ - addOwnership(controller).(*appsv1.Deployment), addOwnership(relayServer).(*appsv1.Deployment), } @@ -537,6 +595,13 @@ func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { } } + areAllNodeProcessed := clusterWatcher.AreAllNodesProcessed() + + // deploy controller + if err := clusterWatcher.deployControllerDeployment(controller); err != nil { + installErr = err + } + //mutation webhook hook, err := clusterWatcher.Client.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(context.Background(), mutationhook.Name, metav1.GetOptions{}) if isNotfound(err) { @@ -564,7 +629,7 @@ func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { if installErr != nil { installErr = nil go clusterWatcher.UpdateCrdStatus(common.OperatorConfigCrd.Name, common.ERROR, common.INSTALLATION_ERR_MSG) - } else if clusterWatcher.AreAllNodesProcessed() { + } else if areAllNodeProcessed { go clusterWatcher.UpdateCrdStatus(common.OperatorConfigCrd.Name, common.RUNNING, common.RUNNING_MSG) } else { go clusterWatcher.UpdateCrdStatus(common.OperatorConfigCrd.Name, common.PENDING, common.PENDING_MSG)