From 410f8f2f0f822afc9288e1ee2fae30415938b0c3 Mon Sep 17 00:00:00 2001 From: Prateek Date: Fri, 6 Dec 2024 03:48:05 +0530 Subject: [PATCH] implement NRI handler In operator, for NRI detection, once it is known that containerd is available as runtime then rather setting it as runtime we go further and try to check for NRI availability using the same process/method. If NRI is not found then containerd would be set as the runtime. In implementation I've used the same logic which was used to detect containerd in case docker was available and then we were trying to see if containerd is available or not. Signed-off-by: Prateek --- KubeArmor/common/common.go | 18 + KubeArmor/config/config.go | 14 + KubeArmor/core/kubeArmor.go | 37 +- KubeArmor/core/nriHandler.go | 442 +++++++++++++++++++++++ KubeArmor/go.mod | 1 + KubeArmor/go.sum | 10 +- pkg/KubeArmorOperator/common/defaults.go | 7 + pkg/KubeArmorOperator/runtime/runtime.go | 25 +- 8 files changed, 543 insertions(+), 11 deletions(-) create mode 100644 KubeArmor/core/nriHandler.go diff --git a/KubeArmor/common/common.go b/KubeArmor/common/common.go index 8f06cc70d8..56bd0fc180 100644 --- a/KubeArmor/common/common.go +++ b/KubeArmor/common/common.go @@ -442,6 +442,24 @@ var ContainerRuntimeSocketMap = map[string][]string{ }, } +// NRISocketMap Structure +var NRISocketMap = map[string][]string{ + "nri": { + "/var/run/nri/nri.sock", + "/run/nri/nri.sock", + }, +} + +// GetNRISocket Function +func GetNRISocket(ContainerRuntime string) string { + for _, candidate := range NRISocketMap["nri"] { + if _, err := os.Stat(candidate); err == nil { + return candidate + } + } + return "" +} + // GetCRISocket Function func GetCRISocket(ContainerRuntime string) string { for _, k := range ContainerRuntimeSocketKeys { diff --git a/KubeArmor/config/config.go b/KubeArmor/config/config.go index 1a667f95a9..bcc97e1ae4 100644 --- a/KubeArmor/config/config.go +++ b/KubeArmor/config/config.go @@ -27,6 +27,8 @@ type KubearmorConfig struct { LogPath string // Log file to use SELinuxProfileDir string // Directory to store SELinux profiles CRISocket string // Container runtime to use + NRISocket string // NRI socket to use + NRIIndex string // NRI socket to use Visibility string // Container visibility to use HostVisibility string // Host visibility to use @@ -82,6 +84,8 @@ const ( ConfigLogPath string = "logPath" ConfigSELinuxProfileDir string = "seLinuxProfileDir" ConfigCRISocket string = "criSocket" + ConfigNRISocket string = "nriSocket" + ConfigNRIIndex string = "nriIndex" ConfigVisibility string = "visibility" ConfigHostVisibility string = "hostVisibility" ConfigKubearmorPolicy string = "enableKubeArmorPolicy" @@ -122,6 +126,8 @@ func readCmdLineParams() { logStr := flag.String(ConfigLogPath, "none", "log file path, {path|stdout|none}") seLinuxProfileDirStr := flag.String(ConfigSELinuxProfileDir, "/tmp/kubearmor.selinux", "SELinux profile directory") criSocket := flag.String(ConfigCRISocket, "", "path to CRI socket (format: unix:///path/to/file.sock)") + nriSocket := flag.String(ConfigNRISocket, "", "path to NRI socket (format: /path/to/file.sock)") + nriIndex := flag.String(ConfigNRIIndex, "99", "NRI plugin index") visStr := flag.String(ConfigVisibility, "process,file,network,capabilities", "Container Visibility to use [process,file,network,capabilities,none]") hostVisStr := flag.String(ConfigHostVisibility, "default", "Host Visibility to use [process,file,network,capabilities,none] (default \"none\" for k8s, \"process,file,network,capabilities\" for VM)") @@ -185,6 +191,8 @@ func readCmdLineParams() { viper.SetDefault(ConfigLogPath, *logStr) viper.SetDefault(ConfigSELinuxProfileDir, *seLinuxProfileDirStr) viper.SetDefault(ConfigCRISocket, *criSocket) + viper.SetDefault(ConfigNRISocket, *nriSocket) + viper.SetDefault(ConfigNRIIndex, *nriIndex) viper.SetDefault(ConfigVisibility, *visStr) viper.SetDefault(ConfigHostVisibility, *hostVisStr) @@ -278,6 +286,12 @@ func LoadConfig() error { return fmt.Errorf("CRI socket must start with 'unix://' (%s is invalid)", GlobalCfg.CRISocket) } + GlobalCfg.NRISocket = os.Getenv("NRI_SOCKET") + if GlobalCfg.NRISocket == "" { + GlobalCfg.NRISocket = viper.GetString(ConfigNRISocket) + } + GlobalCfg.NRIIndex = viper.GetString(ConfigNRIIndex) + GlobalCfg.Policy = viper.GetBool(ConfigKubearmorPolicy) GlobalCfg.HostPolicy = viper.GetBool(ConfigKubearmorHostPolicy) GlobalCfg.KVMAgent = viper.GetBool(ConfigKubearmorVM) diff --git a/KubeArmor/core/kubeArmor.go b/KubeArmor/core/kubeArmor.go index afecce924e..f8bae8ff40 100644 --- a/KubeArmor/core/kubeArmor.go +++ b/KubeArmor/core/kubeArmor.go @@ -597,7 +597,6 @@ func KubeArmor() { // Un-orchestrated workloads if !dm.K8sEnabled && cfg.GlobalCfg.Policy { - // Check if cri socket set, if not then auto detect if cfg.GlobalCfg.CRISocket == "" { if kl.GetCRISocket("") == "" { @@ -626,8 +625,14 @@ func KubeArmor() { // monitor docker events go dm.MonitorDockerEvents() } else if strings.Contains(cfg.GlobalCfg.CRISocket, "containerd") { - // monitor containerd events - go dm.MonitorContainerdEvents() + // insuring NRI monitoring only in case containerd is present + if dm.checkNRIAvailability() { + // monitor NRI events + go dm.MonitorNRIEvents() + } else { + // monitor containerd events + go dm.MonitorContainerdEvents() + } } else if strings.Contains(cfg.GlobalCfg.CRISocket, "cri-o") { // monitor crio events go dm.MonitorCrioEvents() @@ -642,8 +647,10 @@ func KubeArmor() { } if dm.K8sEnabled && cfg.GlobalCfg.Policy { - // check if the CRI socket set while executing kubearmor exists - if cfg.GlobalCfg.CRISocket != "" { + if dm.checkNRIAvailability() { + // monitor NRI events + go dm.MonitorNRIEvents() + } else if cfg.GlobalCfg.CRISocket != "" { // check if the CRI socket set while executing kubearmor exists trimmedSocket := strings.TrimPrefix(cfg.GlobalCfg.CRISocket, "unix://") if _, err := os.Stat(trimmedSocket); err != nil { dm.Logger.Warnf("Error while looking for CRI socket file: %s", err.Error()) @@ -892,3 +899,23 @@ func KubeArmor() { // destroy the daemon dm.DestroyKubeArmorDaemon() } + +func (dm *KubeArmorDaemon) checkNRIAvailability() bool { + // Check if nri socket is set, if not then auto detect + if cfg.GlobalCfg.NRISocket == "" { + if kl.GetNRISocket("") != "" { + cfg.GlobalCfg.NRISocket = kl.GetNRISocket("") + } else { + dm.Logger.Warnf("Error while looking for NRI socket file") + return false + } + } else { + // NRI socket supplied by user, check for existence + _, err := os.Stat(cfg.GlobalCfg.NRISocket) + if err != nil { + dm.Logger.Warnf("Error while looking for NRI socket file %s", err.Error()) + return false + } + } + return true +} diff --git a/KubeArmor/core/nriHandler.go b/KubeArmor/core/nriHandler.go new file mode 100644 index 0000000000..9931b7e972 --- /dev/null +++ b/KubeArmor/core/nriHandler.go @@ -0,0 +1,442 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 Authors of KubeArmor + +package core + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containerd/nri/pkg/api" + "github.com/containerd/nri/pkg/stub" + "github.com/kubearmor/KubeArmor/KubeArmor/common" + kl "github.com/kubearmor/KubeArmor/KubeArmor/common" + cfg "github.com/kubearmor/KubeArmor/KubeArmor/config" + kg "github.com/kubearmor/KubeArmor/KubeArmor/log" + tp "github.com/kubearmor/KubeArmor/KubeArmor/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NRI Handler +var NRI *NRIHandler + +type namespaceKey struct { + PidNS uint32 + MntNS uint32 +} + +// namespaceKeyFromContainer creates a namespaceKey from a container. +func namespaceKeyFromContainer(container tp.Container) namespaceKey { + return namespaceKey{ + PidNS: container.PidNS, + MntNS: container.MntNS, + } +} + +// NRIHandler connects to an NRI socket and informs on container +// creation/deletion events. +type NRIHandler struct { + // NRI plugin stub + stub stub.Stub + + // active containers + containers map[string]tp.Container + + dm *KubeArmorDaemon + + containersByNamespaces map[namespaceKey]string + + handleDeletedContainer func(tp.Container) + handleNewContainer func(tp.Container) +} + +// NewNRIHandler creates a new NRIHandler with the given event callbacks. +func (dm *KubeArmorDaemon) NewNRIHandler( + handleDeletedContainer func(tp.Container), + handleNewContainer func(tp.Container), +) *NRIHandler { + nri := &NRIHandler{dm: dm} + + opts := []stub.Option{ + stub.WithSocketPath(cfg.GlobalCfg.NRISocket), + stub.WithPluginIdx(cfg.GlobalCfg.NRIIndex), + } + + stub, err := stub.New(nri, opts...) + if err != nil { + kg.Errf("Failed to create NRI stub: %s", err.Error()) + return nil + } + + nri.containers = map[string]tp.Container{} + nri.containersByNamespaces = map[namespaceKey]string{} + nri.stub = stub + nri.handleDeletedContainer = handleDeletedContainer + nri.handleNewContainer = handleNewContainer + + return nri +} + +// Start initiates a configured NRI connection. +func (nh *NRIHandler) Start() { + go func() { + err := nh.stub.Run(context.Background()) + if err != nil { + kg.Errf("Failed to connect to NRI: %s", err.Error()) + } + }() +} + +// Stop closes the NRI connection. +func (nh *NRIHandler) Close() { + nh.stub.Stop() +} + +// Synchronize is an NRI callback which is called at the beginning of an NRI +// socket connection to inform on all existing containers. +func (nh *NRIHandler) Synchronize( + _ context.Context, + _ []*api.PodSandbox, + nriContainers []*api.Container, +) ([]*api.ContainerUpdate, error) { + for _, nriContainer := range nriContainers { + container := nh.nriToKubeArmorContainer(nriContainer) + container = nh.mergeContainer(container, false) + + // Overlapping namespace IDs between containers should be impossible + // here + namespaceKey := namespaceKeyFromContainer(container) + nh.containersByNamespaces[namespaceKey] = container.ContainerID + + nh.handleNewContainer(container) + } + + return nil, nil +} + +// StartContainer is an NRI callback which is called after a container has +// started. +// +// Unfortunately we can't use the CreateContainer or PostCreateContainer NRI +// callbacks because they are called without a PID value, which is required in +// order to get the PID and mount namespaces of the container. This means that +// there is a short period of time between a container starting and us enforcing +// it. +// +// If StartContainer detects a container namespace ID overlap with a previous +// container (since Linux can reuse namespace IDs), it will override the old +// policy correctly, but any actions runc took to set up this container and +// start it will be logged/enforced as if they were the old container's actions. +// This should be exceedingly rare, but there's no way using just NRI that we +// can entirely avoid this scenario. +func (nh *NRIHandler) StartContainer( + _ context.Context, + _ *api.PodSandbox, + nriContainer *api.Container, +) error { + container := nh.nriToKubeArmorContainer(nriContainer) + container = nh.mergeContainer(container, false) + + namespaceKey := namespaceKeyFromContainer(container) + + // It's technically possible for a container to crash and a new one to be + // started, all before we receive the StopContainer event. And because Linux + // can reuse namespace IDs, it's possible for the enforcement configuration + // to get confused and messed up, so if namespace IDs ever overlap, we + // assume the previous container using those namespaces has already exited. + if oldContainerID, ok := nh.containersByNamespaces[namespaceKey]; ok { + delete(nh.containers, container.ContainerID) + + nh.handleDeletedContainer(nh.containers[oldContainerID]) + } + + nh.containersByNamespaces[namespaceKey] = container.ContainerID + + nh.handleNewContainer(container) + + return nil +} + +// StopContainer is an NRI callback which is called before a container receives +// the signal to stop. +// +// StopContainer is called synchronously before a termination signal is sent to +// a container, so we can be sure that we stop enforcing before the container +// shuts down, at least in most cases. This means that if a new container reuses +// Linux namespace IDs from a previous container, so long as that previous +// container didn't crash unexpectedly, we can be sure that we won't +// accidentally enforce the new container with the old container's policy. +// +// The tradeoff here is that once a container receives its termination signal, +// KubeArmor is no longer enforcing anything on it while it shuts down. +func (nh *NRIHandler) StopContainer( + _ context.Context, + _ *api.PodSandbox, + nriContainer *api.Container, +) ([]*api.ContainerUpdate, error) { + container := nh.nriToKubeArmorContainer(nriContainer) + container = nh.mergeContainer(container, true) + + // Only handle the container deleted event if it wasn't already 'deleted' by + // the StartContainer event (due to a Linux namespace ID collision). + if _, ok := nh.containersByNamespaces[namespaceKeyFromContainer(container)]; ok { + delete(nh.containers, container.ContainerID) + + nh.handleDeletedContainer(container) + } + + return nil, nil +} + +// RemoveContainer is an NRI callback which is called after a container has +// exited. +// +// In case StopContainer isn't called, we hook into RemoveContainer to ensure +// that we stop enforcing a container after it has exited. For example, the NRI +// API doesn't guarantee that StopContainer will be called if a container +// crashed unexpectedly. +func (nh *NRIHandler) RemoveContainer( + _ context.Context, + _ *api.PodSandbox, + nriContainer *api.Container, +) ([]*api.ContainerUpdate, error) { + container := nh.nriToKubeArmorContainer(nriContainer) + container = nh.mergeContainer(container, true) + + // Only handle the container deleted event if it wasn't already 'deleted' by + // the StartContainer event (due to a Linux namespace ID collision) or + // StopContainer event. + if _, ok := nh.containersByNamespaces[namespaceKeyFromContainer(container)]; ok { + delete(nh.containers, container.ContainerID) + + nh.handleDeletedContainer(container) + } + + return nil, nil +} + +// mergeContainer updates the container with the container's previously-stored +// namespace IDs, if any, also storing namespaceIDs for future reference. +func (nh *NRIHandler) mergeContainer(container tp.Container, removing bool) tp.Container { + if existing, ok := nh.containers[container.ContainerID]; ok { + if existing.PidNS != 0 { + container.PidNS = existing.PidNS + } + + if existing.MntNS != 0 { + container.MntNS = existing.MntNS + } + + nh.containers[container.ContainerID] = container + } else if !removing { + nh.containers[container.ContainerID] = container + } + + return container +} + +// nriToKubeArmorContainer converts an NRI container to a KubeArmor container. +func (nh *NRIHandler) nriToKubeArmorContainer(nriContainer *api.Container) tp.Container { + container := tp.Container{} + + container.ContainerID = nriContainer.Id + container.ContainerName = nriContainer.Name + + container.NamespaceName = "Unknown" + container.EndPointName = "Unknown" + + if _, ok := nriContainer.Labels["io.kubernetes.pod.namespace"]; ok { + container.NamespaceName = nriContainer.Labels["io.kubernetes.pod.namespace"] // Pod namespace + + if _, ok := nriContainer.Labels["io.kubernetes.pod.name"]; ok { + container.EndPointName = nriContainer.Labels["io.kubernetes.pod.name"] // Pod name + } + } + + var podName string + var podNamespace string + + if name, ok := nriContainer.Labels["io.kubernetes.pod.name"]; ok { + podName = name + } + if namespace, ok := nriContainer.Labels["io.kubernetes.pod.namespace"]; ok { + podNamespace = namespace + } + + if nh.dm.K8sEnabled { + pod, err := K8s.K8sClient.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) + if err != nil { + kg.Warnf("failed to fetch Pod: %w\n", err) + } + + if appArmorProfile, ok := pod.Annotations["container.apparmor.security.beta.kubernetes.io/"+nriContainer.Name]; ok { + profile := strings.Split(appArmorProfile, "/") + if len(profile) > 1 { + container.AppArmorProfile = profile[1] + } + } + } else { + container.AppArmorProfile = "kubearmor_" + container.ContainerName + } + + // Read PID and mount namespaces from container root PID + if nriContainer.Pid != 0 { + pid := strconv.Itoa(int(nriContainer.Pid)) + + if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/pid")); err == nil { + if _, err := fmt.Sscanf(data, "pid:[%d]", &container.PidNS); err != nil { + kg.Warnf("Unable to get PidNS (%s, %s, %s)", nriContainer.Id, nriContainer.Pid, err.Error()) + } + } + + if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/mnt")); err == nil { + if _, err := fmt.Sscanf(data, "mnt:[%d]", &container.MntNS); err != nil { + kg.Warnf("Unable to get MntNS (%s, %s, %s)", nriContainer.Id, nriContainer.Pid, err.Error()) + } + } + } + + return container +} + +// MonitorNRIEvents monitors NRI events. +func (dm *KubeArmorDaemon) MonitorNRIEvents() { + dm.WgDaemon.Add(1) + defer dm.WgDaemon.Done() + + handleNewContainer := func(container tp.Container) { + endpoint := tp.EndPoint{} + + dm.ContainersLock.Lock() + + if len(dm.OwnerInfo) > 0 { + if podOwnerInfo, ok := dm.OwnerInfo[container.EndPointName]; ok { + container.Owner = podOwnerInfo + } + } + + if _, ok := dm.Containers[container.ContainerID]; !ok { + dm.Containers[container.ContainerID] = container + dm.ContainersLock.Unlock() + } else if dm.Containers[container.ContainerID].PidNS == 0 && dm.Containers[container.ContainerID].MntNS == 0 { + // this entry was updated by kubernetes before docker detects it + // thus, we here use the info given by kubernetes instead of the info given by docker + + container.NamespaceName = dm.Containers[container.ContainerID].NamespaceName + container.EndPointName = dm.Containers[container.ContainerID].EndPointName + container.Labels = dm.Containers[container.ContainerID].Labels + + container.ContainerName = dm.Containers[container.ContainerID].ContainerName + container.ContainerImage = dm.Containers[container.ContainerID].ContainerImage + + container.PolicyEnabled = dm.Containers[container.ContainerID].PolicyEnabled + + container.ProcessVisibilityEnabled = dm.Containers[container.ContainerID].ProcessVisibilityEnabled + container.FileVisibilityEnabled = dm.Containers[container.ContainerID].FileVisibilityEnabled + container.NetworkVisibilityEnabled = dm.Containers[container.ContainerID].NetworkVisibilityEnabled + container.CapabilitiesVisibilityEnabled = dm.Containers[container.ContainerID].CapabilitiesVisibilityEnabled + + dm.Containers[container.ContainerID] = container + dm.ContainersLock.Unlock() + + dm.EndPointsLock.Lock() + for idx, endPoint := range dm.EndPoints { + if endPoint.NamespaceName == container.NamespaceName && endPoint.EndPointName == container.EndPointName && kl.ContainsElement(endPoint.Containers, container.ContainerID) { + // update containers + if !kl.ContainsElement(endPoint.Containers, container.ContainerID) { // does not make sense but need to verify + dm.EndPoints[idx].Containers = append(dm.EndPoints[idx].Containers, container.ContainerID) + } + + endpoint = dm.EndPoints[idx] + + break + } + } + dm.EndPointsLock.Unlock() + } else { + dm.ContainersLock.Unlock() + return + } + + if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy { + // for throttling + dm.SystemMonitor.Logger.ContainerNsKey[container.ContainerID] = common.OuterKey{ + MntNs: container.MntNS, + PidNs: container.PidNS, + } + // update NsMap + dm.SystemMonitor.AddContainerIDToNsMap(container.ContainerID, container.NamespaceName, container.PidNS, container.MntNS) + dm.RuntimeEnforcer.RegisterContainer(container.ContainerID, container.PidNS, container.MntNS) + if dm.Presets != nil { + dm.Presets.RegisterContainer(container.ContainerID, container.PidNS, container.MntNS) + } + + if len(endpoint.SecurityPolicies) > 0 { // struct can be empty or no policies registered for the endpoint yet + dm.Logger.UpdateSecurityPolicies("ADDED", endpoint) + if dm.RuntimeEnforcer != nil && endpoint.PolicyEnabled == tp.KubeArmorPolicyEnabled { + // enforce security policies + dm.RuntimeEnforcer.UpdateSecurityPolicies(endpoint) + } + if dm.Presets != nil && endpoint.PolicyEnabled == tp.KubeArmorPolicyEnabled { + // enforce preset rules + dm.Presets.UpdateSecurityPolicies(endpoint) + } + } + } + + if !dm.K8sEnabled { + dm.ContainersLock.Lock() + dm.EndPointsLock.Lock() + dm.MatchandUpdateContainerSecurityPolicies(container.ContainerID) + dm.EndPointsLock.Unlock() + dm.ContainersLock.Unlock() + } + + dm.Logger.Printf("Detected a container (added/%.12s/pidns=%d/mntns=%d)", container.ContainerID, container.PidNS, container.MntNS) + } + + handleDeletedContainer := func(container tp.Container) { + dm.ContainersLock.Lock() + _, ok := dm.Containers[container.ContainerID] + if !ok { + dm.ContainersLock.Unlock() + return + } + if !dm.K8sEnabled { + dm.EndPointsLock.Lock() + dm.MatchandRemoveContainerFromEndpoint(container.ContainerID) + dm.EndPointsLock.Unlock() + } + delete(dm.Containers, container.ContainerID) + dm.ContainersLock.Unlock() + + if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy { + outkey := dm.SystemMonitor.Logger.ContainerNsKey[container.ContainerID] + dm.Logger.DeleteAlertMapKey(outkey) + delete(dm.SystemMonitor.Logger.ContainerNsKey, container.ContainerID) + // update NsMap + dm.SystemMonitor.DeleteContainerIDFromNsMap(container.ContainerID, container.NamespaceName, container.PidNS, container.MntNS) + dm.RuntimeEnforcer.UnregisterContainer(container.ContainerID) + if dm.Presets != nil { + dm.Presets.UnregisterContainer(container.ContainerID) + } + } + + dm.Logger.Printf("Detected a container (removed/%.12s/pidns=%d/mntns=%d)", container.ContainerID, container.PidNS, container.MntNS) + } + + NRI = dm.NewNRIHandler(handleDeletedContainer, handleNewContainer) + + // check if NRI exists + if NRI == nil { + return + } + + NRI.Start() + + dm.Logger.Print("Started to monitor NRI events") +} diff --git a/KubeArmor/go.mod b/KubeArmor/go.mod index cbdb486cb7..b7a035d83a 100644 --- a/KubeArmor/go.mod +++ b/KubeArmor/go.mod @@ -29,6 +29,7 @@ require ( github.com/cilium/ebpf v0.12.3 github.com/containerd/containerd/api v1.8.0 github.com/containerd/containerd/v2 v2.0.0 + github.com/containerd/nri v0.8.0 github.com/containerd/typeurl/v2 v2.2.2 github.com/docker/docker v27.1.1+incompatible github.com/fsnotify/fsnotify v1.7.0 diff --git a/KubeArmor/go.sum b/KubeArmor/go.sum index 67469c1078..e3dcba32dd 100644 --- a/KubeArmor/go.sum +++ b/KubeArmor/go.sum @@ -52,6 +52,8 @@ github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/nri v0.8.0 h1:n1S753B9lX8RFrHYeSgwVvS1yaUcHjxbB+f+xzEncRI= +github.com/containerd/nri v0.8.0/go.mod h1:uSkgBrCdEtAiEz4vnrq8gmAC4EnVAM5Klt0OuK5rZYQ= github.com/containerd/platforms v1.0.0-rc.0 h1:GuHWSKgVVO3POn6nRBB4sH63uPOLa87yuuhsGLWaXAA= github.com/containerd/platforms v1.0.0-rc.0/go.mod h1:T1XAzzOdYs3it7l073MNXyxRwQofJfqwi/8cRjufIk4= github.com/containerd/plugin v1.0.0 h1:c8Kf1TNl6+e2TtMHZt+39yAPDbouRH9WAToRjex483Y= @@ -235,10 +237,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= -github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= +github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= +github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= +github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= diff --git a/pkg/KubeArmorOperator/common/defaults.go b/pkg/KubeArmorOperator/common/defaults.go index ef59e6dbd9..2fa47f2734 100644 --- a/pkg/KubeArmorOperator/common/defaults.go +++ b/pkg/KubeArmorOperator/common/defaults.go @@ -213,6 +213,8 @@ var ContainerRuntimeSocketMap = map[string][]string{ "/run/docker.sock", }, "containerd": { + "/run/nri/nri.sock", + "/var/run/nri/nri.sock", "/var/snap/microk8s/common/run/containerd.sock", "/run/k0s/containerd.sock", "/run/k3s/containerd/containerd.sock", @@ -224,6 +226,10 @@ var ContainerRuntimeSocketMap = map[string][]string{ "/var/run/crio/crio.sock", "/run/crio/crio.sock", }, + "nri": { + "/var/run/nri/nri.sock", + "/run/nri/nri.sock", + }, } var HostPathDirectory = corev1.HostPathDirectory @@ -258,6 +264,7 @@ var RuntimeSocketLocation = map[string]string{ "docker": "/var/run/docker.sock", "containerd": "/var/run/containerd/containerd.sock", "cri-o": "/var/run/crio/crio.sock", + "nri": "/var/run/nri/nri.sock", } func ShortSHA(s string) string { diff --git a/pkg/KubeArmorOperator/runtime/runtime.go b/pkg/KubeArmorOperator/runtime/runtime.go index 21277cc38d..16fe7386e1 100644 --- a/pkg/KubeArmorOperator/runtime/runtime.go +++ b/pkg/KubeArmorOperator/runtime/runtime.go @@ -5,19 +5,40 @@ package runtime import ( "os" + "path/filepath" "strings" + "github.com/kubearmor/KubeArmor/KubeArmor/log" "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/common" "go.uber.org/zap" ) +func DetectNRI(pathPrefix, runtime string) (string, string, error) { + var err error + for _, path := range common.ContainerRuntimeSocketMap[runtime] { + if _, err = os.Stat(filepath.Clean(pathPrefix + path)); err == nil || os.IsPermission(err) { + if strings.Contains(path, "nri") { + return "nri", path, nil + } + return runtime, path, nil + } else { + log.Warnf("%s", err) + } + } + return "NA", "NA", err +} + func DetectRuntimeViaMap(pathPrefix string, k8sRuntime string, log zap.SugaredLogger) (string, string) { log.Infof("Checking for %s socket\n", k8sRuntime) if k8sRuntime != "" { for _, path := range common.ContainerRuntimeSocketMap[k8sRuntime] { if _, err := os.Stat(pathPrefix + path); err == nil || os.IsPermission(err) { - if k8sRuntime == "docker" && strings.Contains(path, "containerd") { - return "containerd", path + if (k8sRuntime == "docker" && strings.Contains(path, "containerd")) || k8sRuntime == "containerd" { + if k8sRuntime, path, err = DetectNRI(pathPrefix, "containerd"); err == nil { + return k8sRuntime, path + } else { + log.Warnf("%s", err) + } } return k8sRuntime, path } else {