diff --git a/.dockerignore b/.dockerignore index a9b9d8c8..e629461d 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,3 @@ testdata -cmd node_modules -dist \ No newline at end of file +dist diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index fb7795a7..bc9ec0d5 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -41,29 +41,15 @@ jobs: key: ${{ runner.os }}-build-${{ hashFiles('**/go.sum') }} restore-keys: ${{ runner.os }}-build- - - name: Build agent go binary amd64 - run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-kvisor-amd64 ./cmd/agent - env: - GOOS: linux - GOARCH: amd64 - CGO_ENABLED: 0 - - - name: Build imgcollector go binary amd64 - run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-imgcollector-amd64 ./cmd/imgcollector + - name: Build kvisor go binary amd64 + run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-kvisor-amd64 ./cmd/kvisor env: GOOS: linux GOARCH: amd64 CGO_ENABLED: 0 - name: Build agent go binary arm64 - run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-kvisor-arm64 ./cmd/agent - env: - GOOS: linux - GOARCH: arm64 - CGO_ENABLED: 0 - - - name: Build imgcollector go binary arm64 - run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-imgcollector-arm64 ./cmd/imgcollector + run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-kvisor-arm64 ./cmd/kvisor env: GOOS: linux GOARCH: arm64 @@ -106,45 +92,26 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push pr (agent) + - name: Build and push pr (kvisor) if: ${{ github.event_name == 'pull_request' }} uses: docker/build-push-action@v2 with: context: . - file: ./Dockerfile.agent + file: ./Dockerfile platforms: linux/arm64,linux/amd64 push: ${{ github.event_name == 'pull_request' }} tags: ghcr.io/castai/kvisor/kvisor:${{ github.sha }} - - name: Build and push pr (imgcollector) - if: ${{ github.event_name == 'pull_request' }} - uses: docker/build-push-action@v2 - with: - context: . - file: ./Dockerfile.imgcollector - platforms: linux/arm64,linux/amd64 - push: ${{ github.event_name == 'pull_request' }} - tags: ghcr.io/castai/kvisor/kvisor-imgcollector:${{ github.sha }} - - - name: Build and push main (agent) + - name: Build and push main (kvisor) if: ${{ github.event_name != 'pull_request' && github.event_name != 'release' }} uses: docker/build-push-action@v2 with: context: . - file: ./Dockerfile.agent + file: ./Dockerfile platforms: linux/arm64,linux/amd64 push: ${{ github.event_name != 'pull_request' }} tags: us-docker.pkg.dev/castai-hub/library/kvisor:${{ github.sha }} - - name: Build and push main (imgcollector) - if: ${{ github.event_name != 'pull_request' && github.event_name != 'release' }} - uses: docker/build-push-action@v2 - with: - context: . - file: ./Dockerfile.imgcollector - platforms: linux/arm64,linux/amd64 - push: ${{ github.event_name != 'pull_request' }} - tags: us-docker.pkg.dev/castai-hub/library/kvisor-imgcollector:${{ github.sha }} e2e: name: E2E runs-on: ubuntu-20.04 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 01ac324d..9a7d1279 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -36,29 +36,15 @@ jobs: - name: Get release tag run: echo "RELEASE_TAG=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - name: Build agent go binary amd64 - run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-kvisor-amd64 ./cmd/agent + - name: Build kvisor go binary amd64 + run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-kvisor-amd64 ./cmd/kvisor env: GOOS: linux GOARCH: amd64 CGO_ENABLED: 0 - - name: Build imgcollector go binary amd64 - run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-imgcollector-amd64 ./cmd/imgcollector - env: - GOOS: linux - GOARCH: amd64 - CGO_ENABLED: 0 - - - name: Build agent go binary arm64 - run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-kvisor-arm64 ./cmd/agent - env: - GOOS: linux - GOARCH: arm64 - CGO_ENABLED: 0 - - - name: Build imgcollector go binary arm64 - run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-imgcollector-arm64 ./cmd/imgcollector + - name: Build kvisor go binary arm64 + run: go build -ldflags "-s -w -X main.GitCommit=$GITHUB_SHA -X main.GitRef=$GITHUB_REF -X main.Version=${RELEASE_TAG:-commit-$GITHUB_SHA}" -o bin/castai-kvisor-arm64 ./cmd/kvisor env: GOOS: linux GOARCH: arm64 @@ -77,28 +63,17 @@ jobs: username: _json_key password: ${{ secrets.ARTIFACT_BUILDER_JSON_KEY }} - - name: Build and push release (agent) + - name: Build and push release (kvisor) uses: docker/build-push-action@v2 with: context: . push: true - file: ./Dockerfile.agent + file: ./Dockerfile platforms: linux/arm64,linux/amd64 tags: | us-docker.pkg.dev/castai-hub/library/kvisor:${{ env.RELEASE_TAG }} us-docker.pkg.dev/castai-hub/library/kvisor:latest - - name: Build and push release (imgcollector) - uses: docker/build-push-action@v2 - with: - context: . - push: true - file: ./Dockerfile.imgcollector - platforms: linux/arm64,linux/amd64 - tags: | - us-docker.pkg.dev/castai-hub/library/kvisor-imgcollector:${{ env.RELEASE_TAG }} - us-docker.pkg.dev/castai-hub/library/kvisor-imgcollector:latest - release_chart: name: Release Helm Chart runs-on: ubuntu-20.04 diff --git a/.golangci.yaml b/.golangci.yaml index 11b41b64..6aec701c 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -20,3 +20,4 @@ run: - .github - charts - examples + - cmd/kvisor/kubebench # TODO: Fix kubebench issues diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..e6af9e15 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,9 @@ +FROM alpine:3.18.5 + +# Needed for kube-bench. +RUN apk --no-cache add procps + +ARG TARGETARCH +COPY ./bin/castai-kvisor-$TARGETARCH /usr/local/bin/castai-kvisor +COPY ./cmd/kvisor/kubebench/kubebench-rules /etc/kubebench-rules +ENTRYPOINT ["/usr/local/bin/castai-kvisor"] diff --git a/Dockerfile.agent b/Dockerfile.agent deleted file mode 100644 index 71f294a5..00000000 --- a/Dockerfile.agent +++ /dev/null @@ -1,4 +0,0 @@ -FROM gcr.io/distroless/static-debian11:nonroot -ARG TARGETARCH -COPY ./bin/castai-kvisor-$TARGETARCH /usr/local/bin/castai-kvisor -CMD ["/usr/local/bin/castai-kvisor"] diff --git a/Dockerfile.imgcollector b/Dockerfile.imgcollector deleted file mode 100644 index 40beafe3..00000000 --- a/Dockerfile.imgcollector +++ /dev/null @@ -1,4 +0,0 @@ -FROM gcr.io/distroless/static-debian11:nonroot -ARG TARGETARCH -COPY ./bin/castai-imgcollector-$TARGETARCH /usr/local/bin/castai-imgcollector -CMD ["/usr/local/bin/castai-imgcollector"] diff --git a/Dockerfile.imgcollector.tilt b/Dockerfile.imgcollector.tilt deleted file mode 100644 index cae15e41..00000000 --- a/Dockerfile.imgcollector.tilt +++ /dev/null @@ -1,5 +0,0 @@ -FROM alpine:3.17 - -COPY ./bin/castai-imgcollector /usr/local/bin/castai-imgcollector - -CMD ["/usr/local/bin/castai-imgcollector"] diff --git a/Dockerfile.tilt b/Dockerfile.tilt index 4dca8c3f..acd169e9 100644 --- a/Dockerfile.tilt +++ b/Dockerfile.tilt @@ -11,5 +11,5 @@ RUN /busybox --install FROM base-with-shell COPY ./bin/castai-kvisor /usr/local/bin/castai-kvisor -CMD ["/usr/local/bin/castai-kvisor"] +ENTRYPOINT ["/usr/local/bin/castai-kvisor"] diff --git a/charts/castai-kvisor/templates/deployment.yaml b/charts/castai-kvisor/templates/deployment.yaml index b2f18d0a..673e5280 100644 --- a/charts/castai-kvisor/templates/deployment.yaml +++ b/charts/castai-kvisor/templates/deployment.yaml @@ -68,9 +68,11 @@ spec: securityContext: {{- toYaml .Values.securityContext | nindent 8 }} containers: - - name: kvisor + - name: kvisor # It's important to keep this name as is since we search for image name in kube controller. image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "agent" env: - name: POD_IP valueFrom: diff --git a/charts/castai-kvisor/values.yaml b/charts/castai-kvisor/values.yaml index 1806f829..ff083866 100644 --- a/charts/castai-kvisor/values.yaml +++ b/charts/castai-kvisor/values.yaml @@ -69,6 +69,7 @@ config: | rateLimit: burst: 150 qps: 25 + provider: "" # Kubernetes provider (aks, eks, gke) log: level: "debug" deltaSyncInterval: "15s" @@ -79,7 +80,6 @@ config: | enabled: true scanInterval: "30s" image: - name: "ghcr.io/castai/kvisor/kube-bench:v0.8.0" pullPolicy: IfNotPresent imageScan: enabled: true @@ -89,7 +89,6 @@ config: | serviceAccountName: "{{ (.Values.imageScanServiceAccount | default dict).name }}" apiUrl: "http://kvisor.{{ .Release.Namespace }}.svc.cluster.local.:6060" image: - name: "{{ .Values.image.repository }}-imgcollector:{{ .Values.image.tag | default .Chart.AppVersion }}" pullPolicy: IfNotPresent {{ if .Values.imageScanSecret }} pullSecret: "{{ .Values.imageScanSecret }}" diff --git a/cmd/agent/main.go b/cmd/kvisor/agent/agent.go similarity index 88% rename from cmd/agent/main.go rename to cmd/kvisor/agent/agent.go index 195567c7..fb6ab59e 100644 --- a/cmd/agent/main.go +++ b/cmd/kvisor/agent/agent.go @@ -1,4 +1,4 @@ -package main +package agent import ( "context" @@ -21,6 +21,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/samber/lo" "github.com/sirupsen/logrus" + "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -56,64 +57,63 @@ import ( "github.com/castai/kvisor/version" ) -// These should be set via `go build` during a release. -var ( - GitCommit = "undefined" - GitRef = "no-ref" - Version = "local" -) - -var ( - configPath = flag.String("config", "/etc/castai/config/config.yaml", "Config file path") -) - -func main() { - flag.Parse() - - logger := logrus.New() - cfg, err := config.Load(*configPath) - if err != nil { - logger.Fatal(err) - } - lvl, _ := logrus.ParseLevel(cfg.Log.Level) - logger.SetLevel(lvl) +func NewCommand(version, gitCommit, gitRef string) *cobra.Command { + var configPath string + cmd := &cobra.Command{ + Use: "agent", + Short: "Run kvisor agent server", + Run: func(cmd *cobra.Command, args []string) { + flag.Parse() - binVersion := config.SecurityAgentVersion{ - GitCommit: GitCommit, - GitRef: GitRef, - Version: Version, - } + logger := logrus.New() + cfg, err := config.Load(configPath) + if err != nil { + logger.Fatal(err) + } + lvl, _ := logrus.ParseLevel(cfg.Log.Level) + logger.SetLevel(lvl) - client := castai.NewClient( - cfg.API.URL, cfg.API.Key, - logger, - cfg.API.ClusterID, - cfg.PolicyEnforcement.Enabled, - "castai-kvisor", - binVersion, - ) + binVersion := config.SecurityAgentVersion{ + GitCommit: gitCommit, + GitRef: gitRef, + Version: version, + } - log := logrus.WithFields(logrus.Fields{}) - e := agentlog.NewExporter(logger, client, []logrus.Level{ - logrus.ErrorLevel, - logrus.FatalLevel, - logrus.PanicLevel, - logrus.InfoLevel, - logrus.WarnLevel, - }) + client := castai.NewClient( + cfg.API.URL, cfg.API.Key, + logger, + cfg.API.ClusterID, + cfg.PolicyEnforcement.Enabled, + "castai-kvisor", + binVersion, + ) + + log := logrus.WithFields(logrus.Fields{}) + e := agentlog.NewExporter(logger, client, []logrus.Level{ + logrus.ErrorLevel, + logrus.FatalLevel, + logrus.PanicLevel, + logrus.InfoLevel, + logrus.WarnLevel, + }) - logger.AddHook(e) - logrus.RegisterExitHandler(e.Wait) + logger.AddHook(e) + logrus.RegisterExitHandler(e.Wait) - ctx := signals.SetupSignalHandler() - if err := run(ctx, logger, client, cfg, binVersion); err != nil && !errors.Is(err, context.Canceled) { - logErr := &logContextErr{} - if errors.As(err, &logErr) { - log = logger.WithFields(logErr.fields) - } - log.Fatalf("castai-kvisor failed: %v", err) + ctx := signals.SetupSignalHandler() + if err := run(ctx, logger, client, cfg, binVersion); err != nil && !errors.Is(err, context.Canceled) { + logErr := &logContextErr{} + if errors.As(err, &logErr) { + log = logger.WithFields(logErr.fields) + } + log.Fatalf("castai-kvisor failed: %v", err) + } + log.Info("castai-kvisor stopped") + }, } - log.Info("castai-kvisor stopped") + cmd.PersistentFlags().StringVar(&configPath, "config", "/etc/castai/config/config.yaml", "Config file path") + + return cmd } func run(ctx context.Context, logger logrus.FieldLogger, castaiClient castai.Client, cfg config.Config, binVersion config.SecurityAgentVersion) (reterr error) { @@ -160,7 +160,7 @@ func run(ctx context.Context, logger logrus.FieldLogger, castaiClient castai.Cli snapshotProvider := delta.NewSnapshotProvider() informersFactory := informers.NewSharedInformerFactory(clientSet, 0) - kubeCtrl := kube.NewController(log, informersFactory, k8sVersion) + kubeCtrl := kube.NewController(log, informersFactory, k8sVersion, cfg.PodNamespace) deltaCtrl := delta.NewController( log, @@ -209,6 +209,7 @@ func run(ctx context.Context, logger logrus.FieldLogger, castaiClient castai.Cli cfg.KubeBench.ScanInterval, castaiClient, podLogReader, + kubeCtrl, scannedNodes, ) kubeCtrl.AddSubscribers(kubeBenchCtrl) diff --git a/cmd/agent/main_test.go b/cmd/kvisor/agent/agent_test.go similarity index 98% rename from cmd/agent/main_test.go rename to cmd/kvisor/agent/agent_test.go index cea86051..7cf81662 100644 --- a/cmd/agent/main_test.go +++ b/cmd/kvisor/agent/agent_test.go @@ -1,4 +1,4 @@ -package main +package agent import ( "errors" diff --git a/cmd/imgcollector/collector/collector.go b/cmd/kvisor/imgcollector/collector/collector.go similarity index 99% rename from cmd/imgcollector/collector/collector.go rename to cmd/kvisor/imgcollector/collector/collector.go index 9192c52a..d4ab236d 100644 --- a/cmd/imgcollector/collector/collector.go +++ b/cmd/kvisor/imgcollector/collector/collector.go @@ -13,6 +13,7 @@ import ( "time" fanalyzer "github.com/aquasecurity/trivy/pkg/fanal/analyzer" + "github.com/castai/kvisor/cmd/kvisor/imgcollector/config" "github.com/cenkalti/backoff/v4" "github.com/google/go-containerregistry/pkg/name" "github.com/samber/lo" @@ -23,7 +24,6 @@ import ( "github.com/castai/image-analyzer/image" "github.com/castai/image-analyzer/image/hostfs" "github.com/castai/kvisor/castai" - "github.com/castai/kvisor/cmd/imgcollector/config" ) func New(log logrus.FieldLogger, cfg config.Config, cache analyzer.CacheClient, hostfsConfig *hostfs.ContainerdHostFSConfig) *Collector { diff --git a/cmd/imgcollector/collector/collector_test.go b/cmd/kvisor/imgcollector/collector/collector_test.go similarity index 99% rename from cmd/imgcollector/collector/collector_test.go rename to cmd/kvisor/imgcollector/collector/collector_test.go index b7e28f68..b4401293 100644 --- a/cmd/imgcollector/collector/collector_test.go +++ b/cmd/kvisor/imgcollector/collector/collector_test.go @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/castai/kvisor/cmd/kvisor/imgcollector/config" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/sirupsen/logrus" @@ -23,7 +24,6 @@ import ( "github.com/castai/image-analyzer/image/hostfs" mock_blobcache "github.com/castai/kvisor/blobscache/mock" "github.com/castai/kvisor/castai" - "github.com/castai/kvisor/cmd/imgcollector/config" ) func TestCollector(t *testing.T) { diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/019d8da33d911d9baabe58ad63dea2107ed15115cca0fc27fc0f627e82a695c1 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/019d8da33d911d9baabe58ad63dea2107ed15115cca0fc27fc0f627e82a695c1 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/019d8da33d911d9baabe58ad63dea2107ed15115cca0fc27fc0f627e82a695c1 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/019d8da33d911d9baabe58ad63dea2107ed15115cca0fc27fc0f627e82a695c1 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/0c7cbd62be47ac473553003a18badf4bf06e172969cad1a4e01bf24b7ce8a875 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/0c7cbd62be47ac473553003a18badf4bf06e172969cad1a4e01bf24b7ce8a875 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/0c7cbd62be47ac473553003a18badf4bf06e172969cad1a4e01bf24b7ce8a875 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/0c7cbd62be47ac473553003a18badf4bf06e172969cad1a4e01bf24b7ce8a875 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/1ff6c18fbef2045af6b9c16bf034cc421a29027b800e4f9b68ae9b1cb3e9ae07 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/1ff6c18fbef2045af6b9c16bf034cc421a29027b800e4f9b68ae9b1cb3e9ae07 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/1ff6c18fbef2045af6b9c16bf034cc421a29027b800e4f9b68ae9b1cb3e9ae07 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/1ff6c18fbef2045af6b9c16bf034cc421a29027b800e4f9b68ae9b1cb3e9ae07 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/225ffb162a74d930807d6f49397505fbe0cd2c0ef772a9e9360de02ec170084a b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/225ffb162a74d930807d6f49397505fbe0cd2c0ef772a9e9360de02ec170084a similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/225ffb162a74d930807d6f49397505fbe0cd2c0ef772a9e9360de02ec170084a rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/225ffb162a74d930807d6f49397505fbe0cd2c0ef772a9e9360de02ec170084a diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/251aef3431474c06ff4ff23451f222028f15d5bb26c76e01b93df2805c074546 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/251aef3431474c06ff4ff23451f222028f15d5bb26c76e01b93df2805c074546 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/251aef3431474c06ff4ff23451f222028f15d5bb26c76e01b93df2805c074546 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/251aef3431474c06ff4ff23451f222028f15d5bb26c76e01b93df2805c074546 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/2ed607434f38fb0ed185967b76229ebadb7050355712716e37a486d88f37d224 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/2ed607434f38fb0ed185967b76229ebadb7050355712716e37a486d88f37d224 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/2ed607434f38fb0ed185967b76229ebadb7050355712716e37a486d88f37d224 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/2ed607434f38fb0ed185967b76229ebadb7050355712716e37a486d88f37d224 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/369201a612f7b2b585a8e6ca99f77a36bcdbd032463d815388a96800b63ef2c8 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/369201a612f7b2b585a8e6ca99f77a36bcdbd032463d815388a96800b63ef2c8 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/369201a612f7b2b585a8e6ca99f77a36bcdbd032463d815388a96800b63ef2c8 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/369201a612f7b2b585a8e6ca99f77a36bcdbd032463d815388a96800b63ef2c8 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/3ce5844f62a983e5b10859862cfb1beeb6e0727e3ebb43acb47b2d02cb8c35b2 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/3ce5844f62a983e5b10859862cfb1beeb6e0727e3ebb43acb47b2d02cb8c35b2 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/3ce5844f62a983e5b10859862cfb1beeb6e0727e3ebb43acb47b2d02cb8c35b2 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/3ce5844f62a983e5b10859862cfb1beeb6e0727e3ebb43acb47b2d02cb8c35b2 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4483262f6a6f5772af1dec03774435a0ed263f40e6e835b8b7f4a21aa86484a8 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4483262f6a6f5772af1dec03774435a0ed263f40e6e835b8b7f4a21aa86484a8 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4483262f6a6f5772af1dec03774435a0ed263f40e6e835b8b7f4a21aa86484a8 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4483262f6a6f5772af1dec03774435a0ed263f40e6e835b8b7f4a21aa86484a8 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/45d672e69625ba11ed0c133912a3a66b06364a59552e144f735b9bd14c8dbe76 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/45d672e69625ba11ed0c133912a3a66b06364a59552e144f735b9bd14c8dbe76 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/45d672e69625ba11ed0c133912a3a66b06364a59552e144f735b9bd14c8dbe76 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/45d672e69625ba11ed0c133912a3a66b06364a59552e144f735b9bd14c8dbe76 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4c62f76f6c4b46a4b4b0156363b24c087b97e977eb263cc72db0546878ace0dd b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4c62f76f6c4b46a4b4b0156363b24c087b97e977eb263cc72db0546878ace0dd similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4c62f76f6c4b46a4b4b0156363b24c087b97e977eb263cc72db0546878ace0dd rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4c62f76f6c4b46a4b4b0156363b24c087b97e977eb263cc72db0546878ace0dd diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4ddae8870d41cb97c754c963b45d278ab010d542bda94b678ea94f76238007d6 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4ddae8870d41cb97c754c963b45d278ab010d542bda94b678ea94f76238007d6 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4ddae8870d41cb97c754c963b45d278ab010d542bda94b678ea94f76238007d6 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/4ddae8870d41cb97c754c963b45d278ab010d542bda94b678ea94f76238007d6 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/6e9140ff5cb19f7993d70a63f1c240fa95ad2320025606f2f08c070254da6a46 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/6e9140ff5cb19f7993d70a63f1c240fa95ad2320025606f2f08c070254da6a46 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/6e9140ff5cb19f7993d70a63f1c240fa95ad2320025606f2f08c070254da6a46 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/6e9140ff5cb19f7993d70a63f1c240fa95ad2320025606f2f08c070254da6a46 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/b0d9c54760b35edd1854e5710c1a62a28ad2d2b070c801da3e30a3e59c19e7e3 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/b0d9c54760b35edd1854e5710c1a62a28ad2d2b070c801da3e30a3e59c19e7e3 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/b0d9c54760b35edd1854e5710c1a62a28ad2d2b070c801da3e30a3e59c19e7e3 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/b0d9c54760b35edd1854e5710c1a62a28ad2d2b070c801da3e30a3e59c19e7e3 diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/ccb4851811bec83e2565b9a67b2e6f8924001093a17d152b1a4e2c25b2ac48ce b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/ccb4851811bec83e2565b9a67b2e6f8924001093a17d152b1a4e2c25b2ac48ce similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/ccb4851811bec83e2565b9a67b2e6f8924001093a17d152b1a4e2c25b2ac48ce rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/ccb4851811bec83e2565b9a67b2e6f8924001093a17d152b1a4e2c25b2ac48ce diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/e4d5dd6e67b14bf5bfc57a4ed6126d2ff9618ae17a809ba0eed98b68d62c200d b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/e4d5dd6e67b14bf5bfc57a4ed6126d2ff9618ae17a809ba0eed98b68d62c200d similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/e4d5dd6e67b14bf5bfc57a4ed6126d2ff9618ae17a809ba0eed98b68d62c200d rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/e4d5dd6e67b14bf5bfc57a4ed6126d2ff9618ae17a809ba0eed98b68d62c200d diff --git a/cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/ed210e3e4a5bae1237f1bb44d72a05a2f1e5c6bfe7a7e73da179e2534269c459 b/cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/ed210e3e4a5bae1237f1bb44d72a05a2f1e5c6bfe7a7e73da179e2534269c459 similarity index 100% rename from cmd/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/ed210e3e4a5bae1237f1bb44d72a05a2f1e5c6bfe7a7e73da179e2534269c459 rename to cmd/kvisor/imgcollector/collector/testdata/amd64-linux/io.containerd.content.v1.content/blobs/sha256/ed210e3e4a5bae1237f1bb44d72a05a2f1e5c6bfe7a7e73da179e2534269c459 diff --git a/cmd/imgcollector/collector/testdata/expected_image_scan_meta1.json b/cmd/kvisor/imgcollector/collector/testdata/expected_image_scan_meta1.json similarity index 100% rename from cmd/imgcollector/collector/testdata/expected_image_scan_meta1.json rename to cmd/kvisor/imgcollector/collector/testdata/expected_image_scan_meta1.json diff --git a/cmd/imgcollector/config/config.go b/cmd/kvisor/imgcollector/config/config.go similarity index 100% rename from cmd/imgcollector/config/config.go rename to cmd/kvisor/imgcollector/config/config.go diff --git a/cmd/imgcollector/config/config_test.go b/cmd/kvisor/imgcollector/config/config_test.go similarity index 100% rename from cmd/imgcollector/config/config_test.go rename to cmd/kvisor/imgcollector/config/config_test.go diff --git a/cmd/imgcollector/main.go b/cmd/kvisor/imgcollector/imgcollector.go similarity index 72% rename from cmd/imgcollector/main.go rename to cmd/kvisor/imgcollector/imgcollector.go index 453dcb6e..60e52dcc 100644 --- a/cmd/imgcollector/main.go +++ b/cmd/kvisor/imgcollector/imgcollector.go @@ -1,4 +1,4 @@ -package main +package imgcollector import ( "context" @@ -7,27 +7,31 @@ import ( "runtime" "github.com/castai/image-analyzer/image/hostfs" + "github.com/castai/kvisor/cmd/kvisor/imgcollector/collector" + "github.com/castai/kvisor/cmd/kvisor/imgcollector/config" v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/spf13/cobra" "github.com/sirupsen/logrus" "github.com/castai/kvisor/blobscache" - "github.com/castai/kvisor/cmd/imgcollector/collector" - "github.com/castai/kvisor/cmd/imgcollector/config" ) -// These should be set via `go build` during a release. -var ( - GitCommit = "undefined" - GitRef = "no-ref" - Version = "local" -) +func NewCommand(version, gitCommit string) *cobra.Command { + return &cobra.Command{ + Use: "analyze-image", + Short: "Run kvisor image metadata collection", + Run: func(cmd *cobra.Command, args []string) { + run(cmd.Context(), version, gitCommit) + }, + } +} -func main() { +func run(ctx context.Context, version string, commit string) { logger := logrus.New() logger.SetLevel(logrus.DebugLevel) log := logger.WithField("component", "imagescan_job") - log.Infof("running image scan job, version=%s, commit=%s", Version, GitCommit) + log.Infof("running image scan job, version=%s, commit=%s", version, commit) cfg, err := config.FromEnv() if err != nil { @@ -48,7 +52,7 @@ func main() { } c := collector.New(log, cfg, blobsCache, h) - ctx, cancel := context.WithTimeout(context.Background(), cfg.Timeout) + ctx, cancel := context.WithTimeout(ctx, cfg.Timeout) defer cancel() if cfg.PprofAddr != "" { diff --git a/cmd/imgcollector/stub/licensing/go.mod b/cmd/kvisor/imgcollector/stub/licensing/go.mod similarity index 100% rename from cmd/imgcollector/stub/licensing/go.mod rename to cmd/kvisor/imgcollector/stub/licensing/go.mod diff --git a/cmd/imgcollector/stub/licensing/licensedb/fake.go b/cmd/kvisor/imgcollector/stub/licensing/licensedb/fake.go similarity index 100% rename from cmd/imgcollector/stub/licensing/licensedb/fake.go rename to cmd/kvisor/imgcollector/stub/licensing/licensedb/fake.go diff --git a/cmd/kvisor/kubebench/check/check.go b/cmd/kvisor/kubebench/check/check.go new file mode 100644 index 00000000..58ce6fb7 --- /dev/null +++ b/cmd/kvisor/kubebench/check/check.go @@ -0,0 +1,313 @@ +// Copyright © 2017 Aqua Security Software Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package check + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + + "github.com/golang/glog" +) + +// NodeType indicates the type of node (master, node). +type NodeType string + +// State is the state of a control check. +type State string + +const ( + // PASS check passed. + PASS State = "PASS" + // FAIL check failed. + FAIL State = "FAIL" + // WARN could not carry out check. + WARN State = "WARN" + // INFO informational message + INFO State = "INFO" + + // SKIP for when a check should be skipped. + SKIP = "skip" + + // MASTER a master node + MASTER NodeType = "master" + // NODE a node + NODE NodeType = "node" + // FEDERATED a federated deployment. + FEDERATED NodeType = "federated" + + // ETCD an etcd node + ETCD NodeType = "etcd" + // CONTROLPLANE a control plane node + CONTROLPLANE NodeType = "controlplane" + // POLICIES a node to run policies from + POLICIES NodeType = "policies" + // MANAGEDSERVICES a node to run managedservices from + MANAGEDSERVICES = "managedservices" + + // MANUAL Check Type + MANUAL string = "manual" +) + +// Check contains information about a recommendation in the +// CIS Kubernetes document. +type Check struct { + ID string `yaml:"id" json:"test_number"` + Text string `json:"test_desc"` + Audit string `json:"audit"` + AuditEnv string `yaml:"audit_env"` + AuditConfig string `yaml:"audit_config"` + Type string `json:"type"` + Tests *tests `json:"-"` + Set bool `json:"-"` + Remediation string `json:"remediation"` + TestInfo []string `json:"test_info"` + State `json:"status"` + ActualValue string `json:"actual_value"` + Scored bool `json:"scored"` + IsMultiple bool `yaml:"use_multiple_values"` + ExpectedResult string `json:"expected_result"` + Reason string `json:"reason,omitempty"` + AuditOutput string `json:"-"` + AuditEnvOutput string `json:"-"` + AuditConfigOutput string `json:"-"` + DisableEnvTesting bool `json:"-"` +} + +// Runner wraps the basic Run method. +type Runner interface { + // Run runs a given check and returns the execution state. + Run(c *Check) State +} + +// NewRunner constructs a default Runner. +func NewRunner() Runner { + return &defaultRunner{} +} + +type defaultRunner struct{} + +func (r *defaultRunner) Run(c *Check) State { + return c.run() +} + +// Run executes the audit commands specified in a check and outputs +// the results. +func (c *Check) run() State { + glog.V(3).Infof("----- Running check %v -----", c.ID) + // Since this is an Scored check + // without tests return a 'WARN' to alert + // the user that this check needs attention + if c.Scored && strings.TrimSpace(c.Type) == "" && c.Tests == nil { + c.Reason = "There are no tests" + c.State = WARN + glog.V(3).Info(c.Reason) + return c.State + } + + // If check type is skip, force result to INFO + if c.Type == SKIP { + c.Reason = "Test marked as skip" + c.State = INFO + glog.V(3).Info(c.Reason) + return c.State + } + + // If check type is manual force result to WARN + if c.Type == MANUAL { + c.Reason = "Test marked as a manual test" + c.State = WARN + glog.V(3).Info(c.Reason) + return c.State + } + + // If there aren't any tests defined this is a FAIL or WARN + if c.Tests == nil || len(c.Tests.TestItems) == 0 { + c.Reason = "No tests defined" + if c.Scored { + c.State = FAIL + } else { + c.State = WARN + } + glog.V(3).Info(c.Reason) + return c.State + } + + // Command line parameters override the setting in the config file, so if we get a good result from the Audit command that's all we need to run + var finalOutput *testOutput + var lastCommand string + + lastCommand, err := c.runAuditCommands() + if err == nil { + finalOutput, err = c.execute() + } + + if finalOutput != nil { + if finalOutput.testResult { + c.State = PASS + } else { + if c.Scored { + c.State = FAIL + } else { + c.State = WARN + } + } + + c.ActualValue = finalOutput.actualResult + c.ExpectedResult = finalOutput.ExpectedResult + } + + if err != nil { + c.Reason = err.Error() + if c.Scored { + c.State = FAIL + } else { + c.State = WARN + } + glog.V(3).Info(c.Reason) + } + + if finalOutput != nil { + glog.V(3).Infof("Command: %q TestResult: %t State: %q \n", lastCommand, finalOutput.testResult, c.State) + } else { + glog.V(3).Infof("Command: %q TestResult: <> \n", lastCommand) + } + + if c.Reason != "" { + glog.V(2).Info(c.Reason) + } + return c.State +} + +func (c *Check) runAuditCommands() (lastCommand string, err error) { + // Always run auditEnvOutput if needed + if c.AuditEnv != "" { + c.AuditEnvOutput, err = runAudit(c.AuditEnv) + if err != nil { + return c.AuditEnv, err + } + } + + // Run the audit command and auditConfig commands, if present + c.AuditOutput, err = runAudit(c.Audit) + if err != nil { + return c.Audit, err + } + + c.AuditConfigOutput, err = runAudit(c.AuditConfig) + // when file not found then error comes as exit status 127 + // in some env same error comes as exit status 1 + if err != nil && (strings.Contains(err.Error(), "exit status 127") || + strings.Contains(err.Error(), "No such file or directory")) && + (c.AuditEnvOutput != "" || c.AuditOutput != "") { + // suppress file not found error when there is Audit OR auditEnv output present + glog.V(3).Info(err) + err = nil + c.AuditConfigOutput = "" + } + return c.AuditConfig, err +} + +func (c *Check) execute() (finalOutput *testOutput, err error) { + finalOutput = &testOutput{} + + ts := c.Tests + res := make([]testOutput, len(ts.TestItems)) + expectedResultArr := make([]string, len(res)) + + glog.V(3).Infof("Running %d test_items", len(ts.TestItems)) + for i, t := range ts.TestItems { + + t.isMultipleOutput = c.IsMultiple + + // Try with the auditOutput first, and if that's not found, try the auditConfigOutput + t.auditUsed = AuditCommand + result := *(t.execute(c.AuditOutput)) + + // Check for AuditConfigOutput only if AuditConfig is set and auditConfigOutput is not empty + if !result.flagFound && c.AuditConfig != "" && c.AuditConfigOutput != "" { + // t.isConfigSetting = true + t.auditUsed = AuditConfig + result = *(t.execute(c.AuditConfigOutput)) + if !result.flagFound && t.Env != "" { + t.auditUsed = AuditEnv + result = *(t.execute(c.AuditEnvOutput)) + } + } + + if !result.flagFound && t.Env != "" { + t.auditUsed = AuditEnv + result = *(t.execute(c.AuditEnvOutput)) + } + glog.V(2).Infof("Used %s", t.auditUsed) + res[i] = result + expectedResultArr[i] = res[i].ExpectedResult + } + + var result bool + // If no binary operation is specified, default to AND + switch ts.BinOp { + default: + glog.V(2).Info(fmt.Sprintf("unknown binary operator for tests %s\n", ts.BinOp)) + finalOutput.actualResult = fmt.Sprintf("unknown binary operator for tests %s\n", ts.BinOp) + return finalOutput, fmt.Errorf("unknown binary operator for tests %s", ts.BinOp) + case and, "": + result = true + for i := range res { + result = result && res[i].testResult + } + // Generate an AND expected result + finalOutput.ExpectedResult = strings.Join(expectedResultArr, " AND ") + + case or: + result = false + for i := range res { + result = result || res[i].testResult + } + // Generate an OR expected result + finalOutput.ExpectedResult = strings.Join(expectedResultArr, " OR ") + } + + finalOutput.testResult = result + finalOutput.actualResult = res[0].actualResult + + glog.V(3).Infof("Returning from execute on tests: finalOutput %#v", finalOutput) + return finalOutput, nil +} + +func runAudit(audit string) (output string, err error) { + var out bytes.Buffer + + audit = strings.TrimSpace(audit) + if len(audit) == 0 { + return output, err + } + + cmd := exec.Command("/bin/sh") + cmd.Stdin = strings.NewReader(audit) + cmd.Stdout = &out + cmd.Stderr = &out + err = cmd.Run() + output = out.String() + + if err != nil { + err = fmt.Errorf("failed to run: %q, output: %q, error: %s", audit, output, err) + } else { + glog.V(3).Infof("Command: %q", audit) + glog.V(3).Infof("Output:\n %q", output) + } + return output, err +} diff --git a/cmd/kvisor/kubebench/check/check_test.go b/cmd/kvisor/kubebench/check/check_test.go new file mode 100644 index 00000000..124e6f93 --- /dev/null +++ b/cmd/kvisor/kubebench/check/check_test.go @@ -0,0 +1,245 @@ +// Copyright © 2017-2020 Aqua Security Software Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package check + +import ( + "strings" + "testing" +) + +func TestCheck_Run(t *testing.T) { + type TestCase struct { + name string + check Check + Expected State + } + + testCases := []TestCase{ + {name: "Manual check should WARN", check: Check{Type: MANUAL}, Expected: WARN}, + {name: "Skip check should INFO", check: Check{Type: "skip"}, Expected: INFO}, + {name: "Unscored check (with no type) should WARN on failure", check: Check{Scored: false}, Expected: WARN}, + { + name: "Unscored check that pass should PASS", + check: Check{ + Scored: false, + Audit: "echo hello", + Tests: &tests{TestItems: []*testItem{{ + Flag: "hello", + Set: true, + }}}, + }, + Expected: PASS, + }, + + {name: "Check with no tests should WARN", check: Check{Scored: true}, Expected: WARN}, + {name: "Scored check with empty tests should FAIL", check: Check{Scored: true, Tests: &tests{}}, Expected: FAIL}, + { + name: "Scored check that doesn't pass should FAIL", + check: Check{ + Scored: true, + Audit: "echo hello", + Tests: &tests{TestItems: []*testItem{{ + Flag: "hello", + Set: false, + }}}, + }, + Expected: FAIL, + }, + { + name: "Scored checks that pass should PASS", + check: Check{ + Scored: true, + Audit: "echo hello", + Tests: &tests{TestItems: []*testItem{{ + Flag: "hello", + Set: true, + }}}, + }, + Expected: PASS, + }, + { + name: "Scored checks that pass should PASS when config file is not present", + check: Check{ + Scored: true, + Audit: "echo hello", + AuditConfig: "/test/config.yaml", + Tests: &tests{TestItems: []*testItem{{ + Flag: "hello", + Set: true, + }}}, + }, + Expected: PASS, + }, + { + name: "Scored checks that pass should FAIL when config file is not present", + check: Check{ + Scored: true, + AuditConfig: "/test/config.yaml", + Tests: &tests{TestItems: []*testItem{{ + Flag: "hello", + Set: true, + }}}, + }, + Expected: FAIL, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + testCase.check.run() + if testCase.check.State != testCase.Expected { + t.Errorf("expected %s, actual %s", testCase.Expected, testCase.check.State) + } + }) + } +} + +func TestCheckAuditEnv(t *testing.T) { + passingCases := []*Check{ + controls.Groups[2].Checks[0], + controls.Groups[2].Checks[2], + controls.Groups[2].Checks[3], + controls.Groups[2].Checks[4], + } + + failingCases := []*Check{ + controls.Groups[2].Checks[1], + controls.Groups[2].Checks[5], + controls.Groups[2].Checks[6], + } + + for _, c := range passingCases { + t.Run(c.Text, func(t *testing.T) { + c.run() + if c.State != "PASS" { + t.Errorf("Should PASS, got: %v", c.State) + } + }) + } + + for _, c := range failingCases { + t.Run(c.Text, func(t *testing.T) { + c.run() + if c.State != "FAIL" { + t.Errorf("Should FAIL, got: %v", c.State) + } + }) + } +} + +func TestCheckAuditConfig(t *testing.T) { + passingCases := []*Check{ + controls.Groups[1].Checks[0], + controls.Groups[1].Checks[3], + controls.Groups[1].Checks[5], + controls.Groups[1].Checks[7], + controls.Groups[1].Checks[9], + controls.Groups[1].Checks[15], + } + + failingCases := []*Check{ + controls.Groups[1].Checks[1], + controls.Groups[1].Checks[2], + controls.Groups[1].Checks[4], + controls.Groups[1].Checks[6], + controls.Groups[1].Checks[8], + controls.Groups[1].Checks[10], + controls.Groups[1].Checks[11], + controls.Groups[1].Checks[12], + controls.Groups[1].Checks[13], + controls.Groups[1].Checks[14], + controls.Groups[1].Checks[16], + } + + for _, c := range passingCases { + t.Run(c.Text, func(t *testing.T) { + c.run() + if c.State != "PASS" { + t.Errorf("Should PASS, got: %v", c.State) + } + }) + } + + for _, c := range failingCases { + t.Run(c.Text, func(t *testing.T) { + c.run() + if c.State != "FAIL" { + t.Errorf("Should FAIL, got: %v", c.State) + } + }) + } +} + +func Test_runAudit(t *testing.T) { + type args struct { + audit string + output string + } + tests := []struct { + name string + args args + errMsg string + output string + }{ + { + name: "run success", + args: args{ + audit: "echo 'hello world'", + }, + errMsg: "", + output: "hello world\n", + }, + { + name: "run multiple lines script", + args: args{ + audit: ` +hello() { + echo "hello world" +} + +hello +`, + }, + errMsg: "", + output: "hello world\n", + }, + { + name: "run failed", + args: args{ + audit: "unknown_command", + }, + errMsg: "failed to run: \"unknown_command\", output: \"/bin/sh: ", + output: "not found\n", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var errMsg string + output, err := runAudit(tt.args.audit) + if err != nil { + errMsg = err.Error() + } + if errMsg != "" && !strings.Contains(errMsg, tt.errMsg) { + t.Errorf("name %s errMsg = %q, want %q", tt.name, errMsg, tt.errMsg) + } + if errMsg == "" && output != tt.output { + t.Errorf("name %s output = %q, want %q", tt.name, output, tt.output) + } + if errMsg != "" && !strings.Contains(output, tt.output) { + t.Errorf("name %s output = %q, want %q", tt.name, output, tt.output) + } + }) + } +} diff --git a/cmd/kvisor/kubebench/check/controls.go b/cmd/kvisor/kubebench/check/controls.go new file mode 100644 index 00000000..309800b4 --- /dev/null +++ b/cmd/kvisor/kubebench/check/controls.go @@ -0,0 +1,232 @@ +// Copyright © 2017 Aqua Security Software Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package check + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + + "github.com/golang/glog" + "github.com/onsi/ginkgo/reporters" + "gopkg.in/yaml.v2" +) + +const ( + // UNKNOWN is when the AWS account can't be found + UNKNOWN = "Unknown" + // ARN for the AWS Security Hub service + ARN = "arn:aws:securityhub:%s::product/aqua-security/kube-bench" + // SCHEMA for the AWS Security Hub service + SCHEMA = "2018-10-08" + // TYPE is type of Security Hub finding + TYPE = "Software and Configuration Checks/Industry and Regulatory Standards/CIS Kubernetes Benchmark" +) + +type OverallControls struct { + Controls []*Controls + Totals Summary +} + +// Controls holds all controls to check for master nodes. +type Controls struct { + ID string `yaml:"id" json:"id"` + Version string `json:"version"` + DetectedVersion string `json:"detected_version,omitempty"` + Text string `json:"text"` + Type NodeType `json:"node_type"` + Groups []*Group `json:"tests"` + Summary +} + +// Group is a collection of similar checks. +type Group struct { + ID string `yaml:"id" json:"section"` + Type string `yaml:"type" json:"type"` + Pass int `json:"pass"` + Fail int `json:"fail"` + Warn int `json:"warn"` + Info int `json:"info"` + Text string `json:"desc"` + Checks []*Check `json:"results"` +} + +// Summary is a summary of the results of control checks run. +type Summary struct { + Pass int `json:"total_pass"` + Fail int `json:"total_fail"` + Warn int `json:"total_warn"` + Info int `json:"total_info"` +} + +// Predicate a predicate on the given Group and Check arguments. +type Predicate func(group *Group, check *Check) bool + +// NewControls instantiates a new master Controls object. +func NewControls(t NodeType, in []byte, detectedVersion string) (*Controls, error) { + c := new(Controls) + + err := yaml.Unmarshal(in, c) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal YAML: %s", err) + } + + if t != c.Type { + return nil, fmt.Errorf("non-%s controls file specified", t) + } + c.DetectedVersion = detectedVersion + return c, nil +} + +// RunChecks runs the checks with the given Runner. Only checks for which the filter Predicate returns `true` will run. +func (controls *Controls) RunChecks(runner Runner, filter Predicate, skipIDMap map[string]bool) Summary { + var g []*Group + m := make(map[string]*Group) + controls.Summary.Pass, controls.Summary.Fail, controls.Summary.Warn, controls.Info = 0, 0, 0, 0 + + for _, group := range controls.Groups { + for _, check := range group.Checks { + + if !filter(group, check) { + continue + } + + _, groupSkippedViaCmd := skipIDMap[group.ID] + _, checkSkippedViaCmd := skipIDMap[check.ID] + + if group.Type == SKIP || groupSkippedViaCmd || checkSkippedViaCmd { + check.Type = SKIP + } + + state := runner.Run(check) + + check.TestInfo = append(check.TestInfo, check.Remediation) + + // Check if we have already added this checks group. + if v, ok := m[group.ID]; !ok { + // Create a group with same info + w := &Group{ + ID: group.ID, + Text: group.Text, + Checks: []*Check{}, + } + + // Add this check to the new group + w.Checks = append(w.Checks, check) + summarizeGroup(w, state) + + // Add to groups we have visited. + m[w.ID] = w + g = append(g, w) + } else { + v.Checks = append(v.Checks, check) + summarizeGroup(v, state) + } + + summarize(controls, state) + } + } + + controls.Groups = g + return controls.Summary +} + +// JSON encodes the results of last run to JSON. +func (controls *Controls) JSON() ([]byte, error) { + return json.Marshal(controls) +} + +// JUnit encodes the results of last run to JUnit. +func (controls *Controls) JUnit() ([]byte, error) { + suite := reporters.JUnitTestSuite{ + Name: controls.Text, + TestCases: []reporters.JUnitTestCase{}, + Tests: controls.Summary.Pass + controls.Summary.Fail + controls.Summary.Info + controls.Summary.Warn, + Failures: controls.Summary.Fail, + } + for _, g := range controls.Groups { + for _, check := range g.Checks { + jsonCheck := "" + jsonBytes, err := json.Marshal(check) + if err != nil { + jsonCheck = fmt.Sprintf("Failed to marshal test into JSON: %v. Test as text: %#v", err, check) + } else { + jsonCheck = string(jsonBytes) + } + tc := reporters.JUnitTestCase{ + Name: fmt.Sprintf("%v %v", check.ID, check.Text), + ClassName: g.Text, + + // Store the entire json serialization as system out so we don't lose data in cases where deeper debugging is necessary. + SystemOut: jsonCheck, + } + + switch check.State { + case FAIL: + tc.FailureMessage = &reporters.JUnitFailureMessage{Message: check.Remediation} + case WARN, INFO: + // WARN and INFO are two different versions of skipped tests. Either way it would be a false positive/negative to report + // it any other way. + tc.Skipped = &reporters.JUnitSkipped{} + case PASS: + default: + glog.Warningf("Unrecognized state %s", check.State) + } + + suite.TestCases = append(suite.TestCases, tc) + } + } + + var b bytes.Buffer + encoder := xml.NewEncoder(&b) + encoder.Indent("", " ") + err := encoder.Encode(suite) + if err != nil { + return nil, fmt.Errorf("Failed to generate JUnit report: %s", err.Error()) + } + + return b.Bytes(), nil +} + +func summarize(controls *Controls, state State) { + switch state { + case PASS: + controls.Summary.Pass++ + case FAIL: + controls.Summary.Fail++ + case WARN: + controls.Summary.Warn++ + case INFO: + controls.Summary.Info++ + default: + glog.Warningf("Unrecognized state %s", state) + } +} + +func summarizeGroup(group *Group, state State) { + switch state { + case PASS: + group.Pass++ + case FAIL: + group.Fail++ + case WARN: + group.Warn++ + case INFO: + group.Info++ + default: + glog.Warningf("Unrecognized state %s", state) + } +} diff --git a/cmd/kvisor/kubebench/check/controls_test.go b/cmd/kvisor/kubebench/check/controls_test.go new file mode 100644 index 00000000..7d59763b --- /dev/null +++ b/cmd/kvisor/kubebench/check/controls_test.go @@ -0,0 +1,359 @@ +// Copyright © 2017-2019 Aqua Security Software Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package check + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/onsi/ginkgo/reporters" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "gopkg.in/yaml.v2" +) + +const cfgDir = "../kubebench-rules/" + +type mockRunner struct { + mock.Mock +} + +func (m *mockRunner) Run(c *Check) State { + args := m.Called(c) + return args.Get(0).(State) +} + +// validate that the files we're shipping are valid YAML +func TestYamlFiles(t *testing.T) { + err := filepath.Walk(cfgDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Fatalf("failure accessing path %q: %v\n", path, err) + } + if !info.IsDir() { + t.Logf("reading file: %s", path) + in, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("error opening file %s: %v", path, err) + } + + c := new(Controls) + err = yaml.Unmarshal(in, c) + if err == nil { + t.Logf("YAML file successfully unmarshalled: %s", path) + } else { + t.Fatalf("failed to load YAML from %s: %v", path, err) + } + } + return nil + }) + if err != nil { + t.Fatalf("failure walking cfg dir: %v\n", err) + } +} + +func TestNewControls(t *testing.T) { + t.Run("Should return error when node type is not specified", func(t *testing.T) { + // given + in := []byte(` +--- +controls: +type: # not specified +groups: +`) + // when + _, err := NewControls(MASTER, in, "") + // then + assert.EqualError(t, err, "non-master controls file specified") + }) + + t.Run("Should return error when input YAML is invalid", func(t *testing.T) { + // given + in := []byte("BOOM") + // when + _, err := NewControls(MASTER, in, "") + // then + assert.EqualError(t, err, "failed to unmarshal YAML: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `BOOM` into check.Controls") + }) + +} + +func TestControls_RunChecks_SkippedCmd(t *testing.T) { + t.Run("Should skip checks and groups specified by skipMap", func(t *testing.T) { + // given + normalRunner := &defaultRunner{} + // and + in := []byte(` +--- +type: "master" +groups: +- id: G1 + checks: + - id: G1/C1 + - id: G1/C2 + - id: G1/C3 +- id: G2 + checks: + - id: G2/C1 + - id: G2/C2 +`) + controls, err := NewControls(MASTER, in, "") + assert.NoError(t, err) + + var allChecks Predicate = func(group *Group, c *Check) bool { + return true + } + + skipMap := make(map[string]bool, 0) + skipMap["G1"] = true + skipMap["G2/C1"] = true + skipMap["G2/C2"] = true + controls.RunChecks(normalRunner, allChecks, skipMap) + + G1 := controls.Groups[0] + assertEqualGroupSummary(t, 0, 0, 3, 0, G1) + + G2 := controls.Groups[1] + assertEqualGroupSummary(t, 0, 0, 2, 0, G2) + }) +} + +func TestControls_RunChecks_Skipped(t *testing.T) { + t.Run("Should skip checks where the parent group is marked as skip", func(t *testing.T) { + // given + normalRunner := &defaultRunner{} + // and + in := []byte(` +--- +type: "master" +groups: +- id: G1 + type: skip + checks: + - id: G1/C1 +`) + controls, err := NewControls(MASTER, in, "") + assert.NoError(t, err) + + var allChecks Predicate = func(group *Group, c *Check) bool { + return true + } + emptySkipList := make(map[string]bool, 0) + controls.RunChecks(normalRunner, allChecks, emptySkipList) + + G1 := controls.Groups[0] + assertEqualGroupSummary(t, 0, 0, 1, 0, G1) + }) +} + +func TestControls_RunChecks(t *testing.T) { + t.Run("Should run checks matching the filter and update summaries", func(t *testing.T) { + // given + runner := new(mockRunner) + // and + in := []byte(` +--- +type: "master" +groups: +- id: G1 + checks: + - id: G1/C1 +- id: G2 + checks: + - id: G2/C1 + text: "Verify that the SomeSampleFlag argument is set to true" + audit: "grep -B1 SomeSampleFlag=true /this/is/a/file/path" + tests: + test_items: + - flag: "SomeSampleFlag=true" + compare: + op: has + value: "true" + set: true + remediation: | + Edit the config file /this/is/a/file/path and set SomeSampleFlag to true. + scored: true +`) + // and + controls, err := NewControls(MASTER, in, "") + assert.NoError(t, err) + // and + runner.On("Run", controls.Groups[0].Checks[0]).Return(PASS) + runner.On("Run", controls.Groups[1].Checks[0]).Return(FAIL) + // and + var runAll Predicate = func(group *Group, c *Check) bool { + return true + } + var emptySkipList = make(map[string]bool, 0) + // when + controls.RunChecks(runner, runAll, emptySkipList) + // then + assert.Equal(t, 2, len(controls.Groups)) + // and + G1 := controls.Groups[0] + assert.Equal(t, "G1", G1.ID) + assert.Equal(t, "G1/C1", G1.Checks[0].ID) + assertEqualGroupSummary(t, 1, 0, 0, 0, G1) + // and + G2 := controls.Groups[1] + assert.Equal(t, "G2", G2.ID) + assert.Equal(t, "G2/C1", G2.Checks[0].ID) + assert.Equal(t, "has", G2.Checks[0].Tests.TestItems[0].Compare.Op) + assert.Equal(t, "true", G2.Checks[0].Tests.TestItems[0].Compare.Value) + assert.Equal(t, true, G2.Checks[0].Tests.TestItems[0].Set) + assert.Equal(t, "SomeSampleFlag=true", G2.Checks[0].Tests.TestItems[0].Flag) + assert.Equal(t, "Edit the config file /this/is/a/file/path and set SomeSampleFlag to true.\n", G2.Checks[0].Remediation) + assert.Equal(t, true, G2.Checks[0].Scored) + assertEqualGroupSummary(t, 0, 1, 0, 0, G2) + // and + assert.Equal(t, 1, controls.Summary.Pass) + assert.Equal(t, 1, controls.Summary.Fail) + assert.Equal(t, 0, controls.Summary.Info) + assert.Equal(t, 0, controls.Summary.Warn) + // and + runner.AssertExpectations(t) + }) +} + +func TestControls_JUnitIncludesJSON(t *testing.T) { + testCases := []struct { + desc string + input *Controls + expect []byte + }{ + { + desc: "Serializes to junit", + input: &Controls{ + Groups: []*Group{ + { + ID: "g1", + Checks: []*Check{ + {ID: "check1id", Text: "check1text", State: PASS}, + }, + }, + }, + }, + expect: []byte(` + + {"test_number":"check1id","test_desc":"check1text","audit":"","AuditEnv":"","AuditConfig":"","type":"","remediation":"","test_info":null,"status":"PASS","actual_value":"","scored":false,"IsMultiple":false,"expected_result":""} + +`), + }, { + desc: "Summary values come from summary not checks", + input: &Controls{ + Summary: Summary{ + Fail: 99, + Pass: 100, + Warn: 101, + Info: 102, + }, + Groups: []*Group{ + { + ID: "g1", + Checks: []*Check{ + {ID: "check1id", Text: "check1text", State: PASS}, + }, + }, + }, + }, + expect: []byte(` + + {"test_number":"check1id","test_desc":"check1text","audit":"","AuditEnv":"","AuditConfig":"","type":"","remediation":"","test_info":null,"status":"PASS","actual_value":"","scored":false,"IsMultiple":false,"expected_result":""} + +`), + }, { + desc: "Warn and Info are considered skips and failed tests properly reported", + input: &Controls{ + Groups: []*Group{ + { + ID: "g1", + Checks: []*Check{ + {ID: "check1id", Text: "check1text", State: PASS}, + {ID: "check2id", Text: "check2text", State: INFO}, + {ID: "check3id", Text: "check3text", State: WARN}, + {ID: "check4id", Text: "check4text", State: FAIL}, + }, + }, + }, + }, + expect: []byte(` + + {"test_number":"check1id","test_desc":"check1text","audit":"","AuditEnv":"","AuditConfig":"","type":"","remediation":"","test_info":null,"status":"PASS","actual_value":"","scored":false,"IsMultiple":false,"expected_result":""} + + + + {"test_number":"check2id","test_desc":"check2text","audit":"","AuditEnv":"","AuditConfig":"","type":"","remediation":"","test_info":null,"status":"INFO","actual_value":"","scored":false,"IsMultiple":false,"expected_result":""} + + + + {"test_number":"check3id","test_desc":"check3text","audit":"","AuditEnv":"","AuditConfig":"","type":"","remediation":"","test_info":null,"status":"WARN","actual_value":"","scored":false,"IsMultiple":false,"expected_result":""} + + + + {"test_number":"check4id","test_desc":"check4text","audit":"","AuditEnv":"","AuditConfig":"","type":"","remediation":"","test_info":null,"status":"FAIL","actual_value":"","scored":false,"IsMultiple":false,"expected_result":""} + +`), + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + junitBytes, err := tc.input.JUnit() + if err != nil { + t.Fatalf("Failed to serialize to JUnit: %v", err) + } + + var out reporters.JUnitTestSuite + if err := xml.Unmarshal(junitBytes, &out); err != nil { + t.Fatalf("Unable to deserialize from resulting JUnit: %v", err) + } + + // Check that each check was serialized as json and stored as systemOut. + for iGroup, group := range tc.input.Groups { + for iCheck, check := range group.Checks { + jsonBytes, err := json.Marshal(check) + if err != nil { + t.Fatalf("Failed to serialize to JUnit: %v", err) + } + + if out.TestCases[iGroup*iCheck+iCheck].SystemOut != string(jsonBytes) { + t.Errorf("Expected\n\t%v\n\tbut got\n\t%v", + out.TestCases[iGroup*iCheck+iCheck].SystemOut, + string(jsonBytes), + ) + } + } + } + + if !bytes.Equal(junitBytes, tc.expect) { + t.Errorf("Expected\n\t%v\n\tbut got\n\t%v", + string(tc.expect), + string(junitBytes), + ) + } + }) + } +} + +func assertEqualGroupSummary(t *testing.T, pass, fail, info, warn int, actual *Group) { + t.Helper() + assert.Equal(t, pass, actual.Pass) + assert.Equal(t, fail, actual.Fail) + assert.Equal(t, info, actual.Info) + assert.Equal(t, warn, actual.Warn) +} diff --git a/cmd/kvisor/kubebench/check/data b/cmd/kvisor/kubebench/check/data new file mode 100644 index 00000000..fa3c2fe7 --- /dev/null +++ b/cmd/kvisor/kubebench/check/data @@ -0,0 +1,736 @@ +--- +controls: +id: 1 +text: "Test Checks" +type: "master" +groups: +- id: 1.1 + text: "First Group" + checks: + - id: 0 + text: "flag is set" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--allow-privileged" + set: true + + - id: 1 + text: "flag is not set" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--basic-auth" + set: false + + - id: 2 + text: "flag value is set to some value" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--insecure-port" + compare: + op: eq + value: 0 + set: true + + - id: 3 + text: "flag value is greater than or equal some number" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + + - id: 4 + text: "flag value is less than some number" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--max-backlog" + compare: + op: lt + value: 30 + set: true + + - id: 5 + text: "flag value does not have some value" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--admission-control" + compare: + op: nothave + value: AlwaysAdmit + set: true + + - id: 6 + text: "test AND binary operation" + audit: "echo \"Non empty command\"" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-clientkey" + set: true + + - id: 7 + text: "test OR binary operation" + audit: "echo \"Non empty command\"" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: eq + value: 0 + set: true + - + flag: "--secure-port" + set: false + + - id: 8 + text: "test flag with arbitrary text" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "permissions" + compare: + op: eq + value: "SomeValue" + set: true + + - id: 9 + text: "test permissions" + audit: "/bin/sh -c 'if test -e $config; then stat -c permissions=%a $config; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + set: true + + - id: 10 + text: "flag value includes some value in a comma-separated list, value is last in list" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--admission-control" + compare: + op: has + value: RBAC + set: true + + - id: 11 + text: "flag value includes some value in a comma-separated list, value is first in list" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--admission-control" + compare: + op: has + value: WebHook + set: true + + - id: 12 + text: "flag value includes some value in a comma-separated list, value middle of list" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--admission-control" + compare: + op: has + value: Something + set: true + + - id: 13 + text: "flag value includes some value in a comma-separated list, value only one in list" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--admission-control" + compare: + op: has + value: Something + set: true + + - id: 14 + text: "check that flag some-arg is set to some-val with ':' separator" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "some-arg" + compare: + op: eq + value: some-val + set: true + + - id: 15 + text: "jsonpath correct value on field" + audit: "echo \"Non empty command\"" + audit_config: "echo \"Non empty command\"" + tests: + bin_op: or + test_items: + - path: "{.readOnlyPort}" + compare: + op: eq + value: 15000 + set: true + - path: "{.readOnlyPort}" + compare: + op: gte + value: 15000 + set: true + - path: "{.readOnlyPort}" + compare: + op: lte + value: 15000 + set: true + + - id: 16 + text: "jsonpath correct case-sensitive value on string field" + audit: "echo \"Non empty command\"" + audit_config: "echo \"Non empty command\"" + tests: + test_items: + - path: "{.stringValue}" + compare: + op: noteq + value: "None" + set: true + - path: "{.stringValue}" + compare: + op: noteq + value: "webhook,Something,RBAC" + set: true + - path: "{.stringValue}" + compare: + op: eq + value: "WebHook,Something,RBAC" + set: true + + - id: 17 + text: "jsonpath correct value on boolean field" + audit: "echo \"Non empty command\"" + audit_config: "echo \"Non empty command\"" + tests: + test_items: + - path: "{.trueValue}" + compare: + op: noteq + value: somethingElse + set: true + - path: "{.trueValue}" + compare: + op: noteq + value: false + set: true + - path: "{.trueValue}" + compare: + op: eq + value: true + set: true + + - id: 18 + text: "jsonpath field absent" + audit: "echo \"Non empty command\"" + audit_config: "echo \"Non empty command\"" + tests: + test_items: + - path: "{.notARealField}" + set: false + + - id: 19 + text: "jsonpath correct value on nested field" + audit: "echo \"Non empty command\"" + audit_config: "echo \"Non empty command\"" + tests: + test_items: + - path: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true + + - id: 20 + text: "yamlpath correct value on field" + audit: "echo \"Non empty command\"" + audit_config: "echo \"Non empty command\"" + tests: + test_items: + - path: "{.readOnlyPort}" + compare: + op: gt + value: 14999 + set: true + + - id: 21 + text: "yamlpath field absent" + audit: "echo \"Non empty command\"" + audit_config: "echo \"Non empty command\"" + tests: + test_items: + - path: "{.fieldThatIsUnset}" + set: false + + - id: 22 + text: "yamlpath correct value on nested field" + audit: "echo \"Non empty command\"" + audit_config: "echo \"Non empty command\"" + tests: + test_items: + - path: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true + + - id: 23 + text: "path on invalid json" + audit: "echo \"Non empty command\"" + audit_config: "echo \"Non empty command\"" + tests: + test_items: + - path: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true + + - id: 24 + text: "path with broken expression" + audit: "echo \"Non empty command\"" + audit_config: "echo \"Non empty command\"" + tests: + test_items: + - path: "{.missingClosingBrace}" + set: true + + - id: 25 + text: "yamlpath on invalid yaml" + audit: "echo \"Non empty command\"" + tests: + test_items: + - path: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: "false" + set: true + + - id: 26 + text: "check regex op matches" + audit: "echo \"Non empty command\"" + audit_config: "echo \"Non empty command\"" + tests: + test_items: + - path: "{.currentMasterVersion}" + compare: + op: regex + value: '^1\.12.*$' + set: true + + - id: 27 + text: "check boolean flag with no value" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: true + set: true + + - id: 28 + text: "check boolean flag with false value" + audit: "echo \"Non empty command\"" + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: false + set: true + - id: 29 + text: "flag is set (via env)" + tests: + test_items: + - flag: "--allow-privileged" + env: "ALLOW_PRIVILEGED" + set: true + + - id: 30 + text: "flag is not set (via env)" + tests: + test_items: + - flag: "--basic-auth" + env: "BASIC_AUTH" + set: false + + - id: 31 + text: "flag value is set to some value (via env)" + tests: + test_items: + - flag: "--insecure-port" + env: "INSECURE_PORT" + compare: + op: eq + value: 0 + set: true + + - id: 32 + text: "flag value is greater than or equal some number (via env)" + tests: + test_items: + - flag: "--audit-log-maxage" + env: "AUDIT_LOG_MAXAGE" + compare: + op: gte + value: 30 + set: true + + - id: 33 + text: "flag value is less than some number (via env)" + tests: + test_items: + - env: "MAX_BACKLOG" + compare: + op: lt + value: 30 + set: true + +- id: 2.1 + text: "audit and audit_config commands" + checks: + - id: 0 + text: "audit finds flag and passes, audit_config doesn't exist -> pass" + audit: "echo flag=correct" + tests: + test_items: + - flag: "flag" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 1 + text: "audit finds flag and fails, audit_config doesn't exist -> fail" + audit: "echo flag=wrong" + tests: + test_items: + - flag: "flag" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 2 + text: "audit doesn't find flag, audit_config doesn't exist -> fail" + audit: "echo somethingElse=correct" + tests: + test_items: + - flag: "flag" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 3 + text: "audit doesn't find flag, audit_config has correct setting -> pass" + audit: "echo somethingElse=correct" + audit_config: "echo 'flag: correct'" + tests: + test_items: + - flag: "flag" + path: "{.flag}" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 4 + text: "audit doesn't find flag, audit_config has wrong setting -> fail" + audit: "echo somethingElse=correct" + audit_config: "echo 'flag: wrong'" + tests: + test_items: + - flag: "flag" + path: "{.flag}" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 5 + text: "audit finds correct flag, audit_config has wrong setting -> pass" + audit: "echo flag=correct" + audit_config: "echo 'flag: wrong'" + tests: + test_items: + - flag: "flag" + path: "{.flag}" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 6 + text: "neither audit nor audit_config has correct setting -> fail" + audit: "echo flag=wrong" + audit_config: "echo 'flag: wrong'" + tests: + test_items: + - flag: "flag" + path: "{.flag}" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 7 + text: "audit isn't present, superfluous flag field,audit_config is correct -> pass" + audit_config: "echo 'flag: correct'" + tests: + test_items: + - flag: "flag" + path: "{.flag}" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 8 + text: "audit isn't present, superfluous flag field,audit_config is wrong -> fail" + audit_config: "echo 'flag: wrong'" + tests: + test_items: + - flag: "flag" + path: "{.flag}" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 9 + text: "test use_multiple_values is correct -> pass" + audit: "printf 'permissions=600\npermissions=600\npermissions=600'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + scored: true + - id: 10 + text: "test use_multiple_values is wrong -> fail" + audit: "printf 'permissions=600\npermissions=600\npermissions=644'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + scored: true + - id: 11 + text: "test use_multiple_values include empty value -> fail" + audit: "printf 'permissions=600\n\npermissions=600'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + scored: true + - id: 12 + text: "audit is present and wrong, audit_config is right -> fail (command line parameters override config file)" + audit: "echo flag=wrong" + audit_config: "echo 'flag: correct'" + tests: + test_items: + - flag: "flag" + path: "{.flag}" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 13 + text: "parameter and config file don't have same default - parameter has failing value" + audit: "echo '--read-only-port=1'" + audit_config: "echo 'readOnlyPort: 0'" + tests: + bin_op: and + test_items: + - flag: "--read-only-port" + path: "{.readOnlyPort}" + set: true + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + scored: true + - id: 14 + text: "parameter and config file don't have same default - config file has failing value" + audit: "echo ''" + audit_config: "echo 'readOnlyPort: 1'" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + scored: true + - id: 15 + text: "parameter and config file don't have same default - passing" + audit: "echo ''" + audit_config: "echo ''" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + scored: true + - id: 16 + text: "parameter and config file don't have same default - parameter has bad value and config is not present - failing" + audit: "echo '--read-only-port=1'" + audit_config: "echo ''" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + scored: true + +- id: 3.1 + text: "audit_env commands" + checks: + - id: 0 + text: "audit fails to find flag, audit_env finds flag -> pass" + audit: "echo in=incorrect" + audit_env: "echo flag=correct" + tests: + test_items: + - flag: "flag" + env: "flag" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 1 + text: "audit fails to find flag, audit_env finds flag and fails -> fail" + audit: "echo in=wrong" + audit_env: "echo flag=wrong" + tests: + test_items: + - flag: "flag" + env: "flag" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 2 + text: "audit finds correct flag, audit_env is incorrect -> pass" + audit: "echo flag=correct" + audit_env: "echo flag=incorrect" + tests: + test_items: + - flag: "flag" + env: "flag" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 3 + text: "audit doesn't flag flag, audit_config finds it and passes, audit_env is not present -> pass" + audit: "echo in=correct" + audit_config: "echo 'flag: correct'" + tests: + test_items: + - flag: "flag" + path: "{.flag}" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 4 + text: "audit doesn't flag flag, audit_config doesn't find flag, audit_env finds and passes -> pass" + audit: "echo in=correct" + audit_config: "echo 'in: correct'" + audit_env: "echo flag=correct" + tests: + test_items: + - flag: "flag" + path: "{.flag}" + env: "flag" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 5 + text: "audit doesn't find flag, audit_config doesn't find flag, audit_env finds and fails -> fails" + audit: "echo in=correct" + audit_config: "echo 'in: correct'" + audit_env: "echo flag=incorrect" + tests: + test_items: + - flag: "flag" + path: "{.flag}" + env: "flag" + compare: + op: eq + value: "correct" + set: true + scored: true + - id: 6 + text: "audit finds flag and fails, audit_config finds flag and fails, audit_env finds and passes -> fails" + audit: "echo flag=incorrect" + audit_config: "echo 'flag: incorrect'" + audit_env: "echo flag=correct" + tests: + test_items: + - flag: "flag" + path: "{.flag}" + env: "flag" + compare: + op: eq + value: "correct" + set: true + scored: true diff --git a/cmd/kvisor/kubebench/check/test.go b/cmd/kvisor/kubebench/check/test.go new file mode 100644 index 00000000..2aee9b0b --- /dev/null +++ b/cmd/kvisor/kubebench/check/test.go @@ -0,0 +1,446 @@ +// Copyright © 2017 Aqua Security Software Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package check + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/golang/glog" + "gopkg.in/yaml.v2" + "k8s.io/client-go/util/jsonpath" +) + +// test: +// flag: OPTION +// set: (true|false) +// compare: +// op: (eq|gt|gte|lt|lte|has) +// value: val + +type binOp string + +const ( + and binOp = "and" + or = "or" + defaultArraySeparator = "," +) + +type tests struct { + TestItems []*testItem `yaml:"test_items"` + BinOp binOp `yaml:"bin_op"` +} + +type AuditUsed string + +const ( + AuditCommand AuditUsed = "auditCommand" + AuditConfig AuditUsed = "auditConfig" + AuditEnv AuditUsed = "auditEnv" +) + +type testItem struct { + Flag string + Env string + Path string + Output string + Value string + Set bool + Compare compare + isMultipleOutput bool + auditUsed AuditUsed +} + +type ( + envTestItem testItem + pathTestItem testItem + flagTestItem testItem +) + +type compare struct { + Op string + Value string +} + +type testOutput struct { + testResult bool + flagFound bool + actualResult string + ExpectedResult string +} + +func failTestItem(s string) *testOutput { + return &testOutput{testResult: false, actualResult: s} +} + +func (t testItem) value() string { + if t.auditUsed == AuditConfig { + return t.Path + } + + if t.auditUsed == AuditEnv { + return t.Env + } + + return t.Flag +} + +func (t testItem) findValue(s string) (match bool, value string, err error) { + if t.auditUsed == AuditEnv { + et := envTestItem(t) + return et.findValue(s) + } + + if t.auditUsed == AuditConfig { + pt := pathTestItem(t) + return pt.findValue(s) + } + + ft := flagTestItem(t) + return ft.findValue(s) +} + +func (t flagTestItem) findValue(s string) (match bool, value string, err error) { + if s == "" || t.Flag == "" { + return + } + match = strings.Contains(s, t.Flag) + if match { + // Expects flags in the form; + // --flag=somevalue + // flag: somevalue + // --flag + // somevalue + // DOESN'T COVER - use pathTestItem implementation of findValue() for this + // flag: + // - wehbook + pttn := `(` + t.Flag + `)(=|: *)*([^\s]*) *` + flagRe := regexp.MustCompile(pttn) + vals := flagRe.FindStringSubmatch(s) + + if len(vals) > 0 { + if vals[3] != "" { + value = vals[3] + } else { + // --bool-flag + if strings.HasPrefix(t.Flag, "--") { + value = "true" + } else { + value = vals[1] + } + } + } else { + err = fmt.Errorf("invalid flag in testItem definition: %s", s) + } + } + glog.V(3).Infof("In flagTestItem.findValue %s", value) + + return match, value, err +} + +func (t pathTestItem) findValue(s string) (match bool, value string, err error) { + var jsonInterface interface{} + + err = unmarshal(s, &jsonInterface) + if err != nil { + return false, "", fmt.Errorf("failed to load YAML or JSON from input \"%s\": %v", s, err) + } + + value, err = executeJSONPath(t.Path, &jsonInterface) + if err != nil { + return false, "", fmt.Errorf("unable to parse path expression \"%s\": %v", t.Path, err) + } + + glog.V(3).Infof("In pathTestItem.findValue %s", value) + match = value != "" + return match, value, err +} + +func (t envTestItem) findValue(s string) (match bool, value string, err error) { + if s != "" && t.Env != "" { + r, _ := regexp.Compile(fmt.Sprintf("%s=.*(?:$|\\n)", t.Env)) + out := r.FindString(s) + out = strings.Replace(out, "\n", "", 1) + out = strings.Replace(out, fmt.Sprintf("%s=", t.Env), "", 1) + + if len(out) > 0 { + match = true + value = out + } else { + match = false + value = "" + } + } + glog.V(3).Infof("In envTestItem.findValue %s", value) + return match, value, nil +} + +func (t testItem) execute(s string) *testOutput { + result := &testOutput{} + s = strings.TrimRight(s, " \n") + + // If the test has output that should be evaluated for each row + var output []string + if t.isMultipleOutput { + output = strings.Split(s, "\n") + } else { + output = []string{s} + } + + for _, op := range output { + result = t.evaluate(op) + // If the test failed for the current row, no need to keep testing for this output + if !result.testResult { + break + } + } + + result.actualResult = s + return result +} + +func (t testItem) evaluate(s string) *testOutput { + result := &testOutput{} + + match, value, err := t.findValue(s) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + return failTestItem(err.Error()) + } + + if t.Set { + if match && t.Compare.Op != "" { + result.ExpectedResult, result.testResult = compareOp(t.Compare.Op, value, t.Compare.Value, t.value()) + } else { + result.ExpectedResult = fmt.Sprintf("'%s' is present", t.value()) + result.testResult = match + } + } else { + result.ExpectedResult = fmt.Sprintf("'%s' is not present", t.value()) + result.testResult = !match + } + + result.flagFound = match + isExist := "exists" + if !result.flagFound { + isExist = "does not exist" + } + switch t.auditUsed { + case AuditCommand: + glog.V(3).Infof("Flag '%s' %s", t.Flag, isExist) + case AuditConfig: + glog.V(3).Infof("Path '%s' %s", t.Path, isExist) + case AuditEnv: + glog.V(3).Infof("Env '%s' %s", t.Env, isExist) + default: + glog.V(3).Infof("Error with identify audit used %s", t.auditUsed) + } + + return result +} + +func compareOp(tCompareOp string, flagVal string, tCompareValue string, flagName string) (string, bool) { + expectedResultPattern := "" + testResult := false + + switch tCompareOp { + case "eq": + expectedResultPattern = "'%s' is equal to '%s'" + value := strings.ToLower(flagVal) + // Do case insensitive comparaison for booleans ... + if value == "false" || value == "true" { + testResult = value == tCompareValue + } else { + testResult = flagVal == tCompareValue + } + + case "noteq": + expectedResultPattern = "'%s' is not equal to '%s'" + value := strings.ToLower(flagVal) + // Do case insensitive comparaison for booleans ... + if value == "false" || value == "true" { + testResult = !(value == tCompareValue) + } else { + testResult = !(flagVal == tCompareValue) + } + + case "gt", "gte", "lt", "lte": + a, b, err := toNumeric(flagVal, tCompareValue) + if err != nil { + expectedResultPattern = "Invalid Number(s) used for comparison: '%s' '%s'" + glog.V(1).Infof(fmt.Sprintf("Not numeric value - flag: %q - compareValue: %q %v\n", flagVal, tCompareValue, err)) + return fmt.Sprintf(expectedResultPattern, flagVal, tCompareValue), false + } + switch tCompareOp { + case "gt": + expectedResultPattern = "'%s' is greater than %s" + testResult = a > b + + case "gte": + expectedResultPattern = "'%s' is greater or equal to %s" + testResult = a >= b + + case "lt": + expectedResultPattern = "'%s' is lower than %s" + testResult = a < b + + case "lte": + expectedResultPattern = "'%s' is lower or equal to %s" + testResult = a <= b + } + + case "has": + expectedResultPattern = "'%s' has '%s'" + testResult = strings.Contains(flagVal, tCompareValue) + + case "nothave": + expectedResultPattern = "'%s' does not have '%s'" + testResult = !strings.Contains(flagVal, tCompareValue) + + case "regex": + expectedResultPattern = "'%s' matched by regex expression '%s'" + opRe := regexp.MustCompile(tCompareValue) + testResult = opRe.MatchString(flagVal) + + case "valid_elements": + expectedResultPattern = "'%s' contains valid elements from '%s'" + s := splitAndRemoveLastSeparator(flagVal, defaultArraySeparator) + target := splitAndRemoveLastSeparator(tCompareValue, defaultArraySeparator) + testResult = allElementsValid(s, target) + + case "bitmask": + expectedResultPattern = "%s has permissions " + flagVal + ", expected %s or more restrictive" + requested, err := strconv.ParseInt(flagVal, 8, 64) + if err != nil { + glog.V(1).Infof(fmt.Sprintf("Not numeric value - flag: %q - compareValue: %q %v\n", flagVal, tCompareValue, err)) + return fmt.Sprintf("Not numeric value - flag: %s", flagVal), false + } + max, err := strconv.ParseInt(tCompareValue, 8, 64) + if err != nil { + glog.V(1).Infof(fmt.Sprintf("Not numeric value - flag: %q - compareValue: %q %v\n", flagVal, tCompareValue, err)) + return fmt.Sprintf("Not numeric value - flag: %s", tCompareValue), false + } + testResult = (max & requested) == requested + } + if expectedResultPattern == "" { + return expectedResultPattern, testResult + } + + return fmt.Sprintf(expectedResultPattern, flagName, tCompareValue), testResult +} + +func unmarshal(s string, jsonInterface *interface{}) error { + data := []byte(s) + err := json.Unmarshal(data, jsonInterface) + if err != nil { + err := yaml.Unmarshal(data, jsonInterface) + if err != nil { + return err + } + } + return nil +} + +func executeJSONPath(path string, jsonInterface interface{}) (string, error) { + j := jsonpath.New("jsonpath") + j.AllowMissingKeys(true) + err := j.Parse(path) + if err != nil { + return "", err + } + + buf := new(bytes.Buffer) + err = j.Execute(buf, jsonInterface) + if err != nil { + return "", err + } + jsonpathResult := buf.String() + return jsonpathResult, nil +} + +func allElementsValid(s, t []string) bool { + sourceEmpty := len(s) == 0 + targetEmpty := len(t) == 0 + + if sourceEmpty && targetEmpty { + return true + } + + // XOR comparison - + // if either value is empty and the other is not empty, + // not all elements are valid + if (sourceEmpty || targetEmpty) && !(sourceEmpty && targetEmpty) { + return false + } + + for _, sv := range s { + found := false + for _, tv := range t { + if sv == tv { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +func splitAndRemoveLastSeparator(s, sep string) []string { + cleanS := strings.TrimRight(strings.TrimSpace(s), sep) + if len(cleanS) == 0 { + return []string{} + } + + ts := strings.Split(cleanS, sep) + for i := range ts { + ts[i] = strings.TrimSpace(ts[i]) + } + + return ts +} + +func toNumeric(a, b string) (c, d int, err error) { + c, err = strconv.Atoi(strings.TrimSpace(a)) + if err != nil { + return -1, -1, fmt.Errorf("toNumeric - error converting %s: %s", a, err) + } + d, err = strconv.Atoi(strings.TrimSpace(b)) + if err != nil { + return -1, -1, fmt.Errorf("toNumeric - error converting %s: %s", b, err) + } + + return c, d, nil +} + +func (t *testItem) UnmarshalYAML(unmarshal func(interface{}) error) error { + type buildTest testItem + + // Make Set parameter to be true by default. + newTestItem := buildTest{Set: true} + err := unmarshal(&newTestItem) + if err != nil { + return err + } + *t = testItem(newTestItem) + return nil +} diff --git a/cmd/kvisor/kubebench/check/test_test.go b/cmd/kvisor/kubebench/check/test_test.go new file mode 100644 index 00000000..29eb2692 --- /dev/null +++ b/cmd/kvisor/kubebench/check/test_test.go @@ -0,0 +1,1408 @@ +// Copyright © 2017-2020 Aqua Security Software Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package check + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "testing" +) + +var ( + in []byte + controls *Controls +) + +func init() { + var err error + in, err = ioutil.ReadFile("data") + if err != nil { + panic("Failed reading test data: " + err.Error()) + } + + // substitute variables in data file + user := os.Getenv("USER") + s := strings.Replace(string(in), "$user", user, -1) + + controls, err = NewControls(MASTER, []byte(s), "") + // controls, err = NewControls(MASTER, in) + if err != nil { + panic("Failed creating test controls: " + err.Error()) + } +} + +func TestTestExecute(t *testing.T) { + cases := []struct { + check *Check + str string + strConfig string + expectedTestResult string + strEnv string + }{ + { + check: controls.Groups[0].Checks[0], + str: "2:45 ../kubernetes/kube-apiserver --allow-privileged=false --option1=20,30,40", + strConfig: "", + expectedTestResult: "'--allow-privileged' is present", + }, + { + check: controls.Groups[0].Checks[1], + str: "2:45 ../kubernetes/kube-apiserver --allow-privileged=false", + strConfig: "", + expectedTestResult: "'--basic-auth' is not present", + }, + { + check: controls.Groups[0].Checks[2], + str: "niinai 13617 2635 99 19:26 pts/20 00:03:08 ./kube-apiserver --insecure-port=0 --anonymous-auth", + strConfig: "", + expectedTestResult: "'--insecure-port' is equal to '0'", + }, + { + check: controls.Groups[0].Checks[3], + str: "2:45 ../kubernetes/kube-apiserver --secure-port=0 --audit-log-maxage=40 --option", + strConfig: "", + expectedTestResult: "'--audit-log-maxage' is greater or equal to 30", + }, + { + check: controls.Groups[0].Checks[4], + str: "2:45 ../kubernetes/kube-apiserver --max-backlog=20 --secure-port=0 --audit-log-maxage=40 --option", + strConfig: "", + expectedTestResult: "'--max-backlog' is lower than 30", + }, + { + check: controls.Groups[0].Checks[5], + str: "2:45 ../kubernetes/kube-apiserver --option --admission-control=WebHook,RBAC ---audit-log-maxage=40", + strConfig: "", + expectedTestResult: "'--admission-control' does not have 'AlwaysAdmit'", + }, + { + check: controls.Groups[0].Checks[6], + str: "2:45 .. --kubelet-clientkey=foo --kubelet-client-certificate=bar --admission-control=Webhook,RBAC", + strConfig: "", + expectedTestResult: "'--kubelet-client-certificate' is present AND '--kubelet-clientkey' is present", + }, + { + check: controls.Groups[0].Checks[7], + str: "2:45 .. --secure-port=0 --kubelet-client-certificate=bar --admission-control=Webhook,RBAC", + strConfig: "", + expectedTestResult: "'--secure-port' is equal to '0' OR '--secure-port' is not present", + }, + { + check: controls.Groups[0].Checks[8], + str: "permissions=SomeValue", + strConfig: "", + expectedTestResult: "'permissions' is equal to 'SomeValue'", + }, + { + check: controls.Groups[0].Checks[9], + str: "permissions=640", + strConfig: "", + expectedTestResult: "permissions has permissions 640, expected 644 or more restrictive", + }, + { + check: controls.Groups[0].Checks[9], + str: "permissions=600", + strConfig: "", + expectedTestResult: "permissions has permissions 600, expected 644 or more restrictive", + }, + { + check: controls.Groups[0].Checks[10], + str: "2:45 ../kubernetes/kube-apiserver --option --admission-control=WebHook,RBAC ---audit-log-maxage=40", + strConfig: "", + expectedTestResult: "'--admission-control' has 'RBAC'", + }, + { + check: controls.Groups[0].Checks[11], + str: "2:45 ../kubernetes/kube-apiserver --option --admission-control=WebHook,RBAC ---audit-log-maxage=40", + strConfig: "", + expectedTestResult: "'--admission-control' has 'WebHook'", + }, + { + check: controls.Groups[0].Checks[12], + str: "2:45 ../kubernetes/kube-apiserver --option --admission-control=WebHook,Something,RBAC ---audit-log-maxage=40", + strConfig: "", + expectedTestResult: "'--admission-control' has 'Something'", + }, + { + check: controls.Groups[0].Checks[13], + str: "2:45 ../kubernetes/kube-apiserver --option --admission-control=Something ---audit-log-maxage=40", + strConfig: "", + expectedTestResult: "'--admission-control' has 'Something'", + }, + { + // check for ':' as argument-value separator, with space between arg and val + check: controls.Groups[0].Checks[14], + str: "2:45 kube-apiserver some-arg: some-val --admission-control=Something ---audit-log-maxage=40", + strConfig: "", + expectedTestResult: "'some-arg' is equal to 'some-val'", + }, + { + // check for ':' as argument-value separator, with no space between arg and val + check: controls.Groups[0].Checks[14], + str: "2:45 kube-apiserver some-arg:some-val --admission-control=Something ---audit-log-maxage=40", + strConfig: "", + expectedTestResult: "'some-arg' is equal to 'some-val'", + }, + { + check: controls.Groups[0].Checks[15], + str: "", + strConfig: "{\"readOnlyPort\": 15000}", + expectedTestResult: "'{.readOnlyPort}' is equal to '15000' OR '{.readOnlyPort}' is greater or equal to 15000 OR '{.readOnlyPort}' is lower or equal to 15000", + }, + { + check: controls.Groups[0].Checks[16], + str: "", + strConfig: "{\"stringValue\": \"WebHook,Something,RBAC\"}", + expectedTestResult: "'{.stringValue}' is not equal to 'None' AND '{.stringValue}' is not equal to 'webhook,Something,RBAC' AND '{.stringValue}' is equal to 'WebHook,Something,RBAC'", + }, + { + check: controls.Groups[0].Checks[17], + str: "", + strConfig: "{\"trueValue\": true}", + expectedTestResult: "'{.trueValue}' is not equal to 'somethingElse' AND '{.trueValue}' is not equal to 'false' AND '{.trueValue}' is equal to 'true'", + }, + { + check: controls.Groups[0].Checks[18], + str: "", + strConfig: "{\"readOnlyPort\": 15000}", + expectedTestResult: "'{.notARealField}' is not present", + }, + { + check: controls.Groups[0].Checks[19], + str: "", + strConfig: "{\"authentication\": { \"anonymous\": {\"enabled\": false}}}", + expectedTestResult: "'{.authentication.anonymous.enabled}' is equal to 'false'", + }, + { + check: controls.Groups[0].Checks[20], + str: "", + strConfig: "readOnlyPort: 15000", + expectedTestResult: "'{.readOnlyPort}' is greater than 14999", + }, + { + check: controls.Groups[0].Checks[21], + str: "", + strConfig: "readOnlyPort: 15000", + expectedTestResult: "'{.fieldThatIsUnset}' is not present", + }, + { + check: controls.Groups[0].Checks[22], + str: "", + strConfig: "authentication:\n anonymous:\n enabled: false", + expectedTestResult: "'{.authentication.anonymous.enabled}' is equal to 'false'", + }, + { + check: controls.Groups[0].Checks[26], + str: "", + strConfig: "currentMasterVersion: 1.12.7", + expectedTestResult: "'{.currentMasterVersion}' matched by regex expression '^1\\.12.*$'", + }, + { + check: controls.Groups[0].Checks[27], + str: "--peer-client-cert-auth", + strConfig: "", + expectedTestResult: "'--peer-client-cert-auth' is equal to 'true'", + }, + { + check: controls.Groups[0].Checks[27], + str: "--abc=true --peer-client-cert-auth --efg=false", + strConfig: "", + expectedTestResult: "'--peer-client-cert-auth' is equal to 'true'", + }, + { + check: controls.Groups[0].Checks[27], + str: "--abc --peer-client-cert-auth --efg", + strConfig: "", + expectedTestResult: "'--peer-client-cert-auth' is equal to 'true'", + }, + { + check: controls.Groups[0].Checks[27], + str: "--peer-client-cert-auth=true", + strConfig: "", + expectedTestResult: "'--peer-client-cert-auth' is equal to 'true'", + }, + { + check: controls.Groups[0].Checks[27], + str: "--abc --peer-client-cert-auth=true --efg", + strConfig: "", + expectedTestResult: "'--peer-client-cert-auth' is equal to 'true'", + }, + { + check: controls.Groups[0].Checks[28], + str: "--abc --peer-client-cert-auth=false --efg", + strConfig: "", + expectedTestResult: "'--peer-client-cert-auth' is equal to 'false'", + }, + { + check: controls.Groups[0].Checks[29], + str: "2:45 ../kubernetes/kube-apiserver --option1=20,30,40", + strConfig: "", + expectedTestResult: "'ALLOW_PRIVILEGED' is present", + strEnv: "SOME_OTHER_ENV=true\nALLOW_PRIVILEGED=false", + }, + { + check: controls.Groups[0].Checks[30], + str: "2:45 ../kubernetes/kube-apiserver --option1=20,30,40", + strConfig: "", + expectedTestResult: "'BASIC_AUTH' is not present", + strEnv: "", + }, + { + check: controls.Groups[0].Checks[31], + str: "2:45 ../kubernetes/kube-apiserver --option1=20,30,40", + strConfig: "", + expectedTestResult: "'INSECURE_PORT' is equal to '0'", + strEnv: "INSECURE_PORT=0", + }, + { + check: controls.Groups[0].Checks[32], + str: "2:45 ../kubernetes/kube-apiserver --option1=20,30,40", + strConfig: "", + expectedTestResult: "'AUDIT_LOG_MAXAGE' is greater or equal to 30", + strEnv: "AUDIT_LOG_MAXAGE=40", + }, + { + check: controls.Groups[0].Checks[33], + str: "2:45 ../kubernetes/kube-apiserver --option1=20,30,40", + strConfig: "", + expectedTestResult: "'MAX_BACKLOG' is lower than 30", + strEnv: "MAX_BACKLOG=20", + }, + } + + for _, c := range cases { + t.Run(c.check.Text, func(t *testing.T) { + c.check.AuditOutput = c.str + c.check.AuditConfigOutput = c.strConfig + c.check.AuditEnvOutput = c.strEnv + res, err := c.check.execute() + if err != nil { + t.Errorf(err.Error()) + } + if !res.testResult { + t.Errorf("Test ID %v - expected:%v, got:%v", c.check.ID, true, res) + } + if res.ExpectedResult != c.expectedTestResult { + t.Errorf("Test ID %v - \nexpected:%v, \ngot: %v", c.check.ID, c.expectedTestResult, res.ExpectedResult) + } + }) + } +} + +func TestTestExecuteExceptions(t *testing.T) { + cases := []struct { + *Check + str string + }{ + { + controls.Groups[0].Checks[23], + "this is not valid json {} at all", + }, + { + controls.Groups[0].Checks[24], + "{\"key\": \"value\"}", + }, + { + controls.Groups[0].Checks[25], + "broken } yaml\nenabled: true", + }, + { + controls.Groups[0].Checks[26], + "currentMasterVersion: 1.11", + }, + { + controls.Groups[0].Checks[26], + "currentMasterVersion: ", + }, + } + + for _, c := range cases { + t.Run(c.Text, func(t *testing.T) { + c.Check.AuditConfigOutput = c.str + res, err := c.Check.execute() + if err != nil { + t.Errorf(err.Error()) + } + if res.testResult { + t.Errorf("expected:%v, got:%v", false, res) + } + }) + } +} + +func TestTestUnmarshal(t *testing.T) { + type kubeletConfig struct { + Kind string + ApiVersion string + Address string + } + cases := []struct { + content string + jsonInterface interface{} + expectedToFail bool + }{ + { + `{ + "kind": "KubeletConfiguration", + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "address": "0.0.0.0" + } + `, + kubeletConfig{}, + false, + }, + { + ` +kind: KubeletConfiguration +address: 0.0.0.0 +apiVersion: kubelet.config.k8s.io/v1beta1 +authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 2m0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt +tlsCipherSuites: + - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +`, + kubeletConfig{}, + false, + }, + { + ` +kind: ddress: 0.0.0.0 +apiVersion: kubelet.config.k8s.io/v1beta +`, + kubeletConfig{}, + true, + }, + } + + for id, c := range cases { + t.Run(fmt.Sprintf("%d", id), func(t *testing.T) { + err := unmarshal(c.content, &c.jsonInterface) + if err != nil { + if !c.expectedToFail { + t.Errorf("should pass, got error:%v", err) + } + } else { + if c.expectedToFail { + t.Errorf("should fail, but passed") + } + } + }) + } +} + +func TestExecuteJSONPath(t *testing.T) { + type kubeletConfig struct { + Kind string + ApiVersion string + Address string + } + cases := []struct { + name string + jsonPath string + jsonInterface kubeletConfig + expectedResult string + expectedToFail bool + }{ + { + "JSONPath parse works, results don't match", + "{.resourcesproviders.aescbc}", + kubeletConfig{ + Kind: "KubeletConfiguration", + ApiVersion: "kubelet.config.k8s.io/v1beta1", + Address: "127.0.0.0", + }, + "blah", + true, + }, + { + "JSONPath parse works, results match", + "{.Kind}", + kubeletConfig{ + Kind: "KubeletConfiguration", + ApiVersion: "kubelet.config.k8s.io/v1beta1", + Address: "127.0.0.0", + }, + "KubeletConfiguration", + false, + }, + { + "JSONPath parse fails", + "{.ApiVersion", + kubeletConfig{ + Kind: "KubeletConfiguration", + ApiVersion: "kubelet.config.k8s.io/v1beta1", + Address: "127.0.0.0", + }, + "", + true, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + result, err := executeJSONPath(c.jsonPath, c.jsonInterface) + if err != nil && !c.expectedToFail { + t.Fatalf("jsonPath:%q, expectedResult:%q got:%v", c.jsonPath, c.expectedResult, err) + } + if c.expectedResult != result && !c.expectedToFail { + t.Errorf("jsonPath:%q, expectedResult:%q got:%q", c.jsonPath, c.expectedResult, result) + } + }) + } +} + +func TestAllElementsValid(t *testing.T) { + cases := []struct { + source []string + target []string + valid bool + }{ + { + source: []string{}, + target: []string{}, + valid: true, + }, + { + source: []string{"blah"}, + target: []string{}, + valid: false, + }, + { + source: []string{}, + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, + valid: false, + }, + { + source: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"}, + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, + valid: true, + }, + { + source: []string{"blah"}, + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, + valid: false, + }, + { + source: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "blah"}, + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, + valid: false, + }, + } + for id, c := range cases { + t.Run(fmt.Sprintf("%d", id), func(t *testing.T) { + if !allElementsValid(c.source, c.target) && c.valid { + t.Errorf("Not All Elements in %q are found in %q", c.source, c.target) + } + }) + } +} + +func TestSplitAndRemoveLastSeparator(t *testing.T) { + cases := []struct { + source string + valid bool + elementCnt int + }{ + { + source: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256", + valid: true, + elementCnt: 8, + }, + { + source: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,", + valid: true, + elementCnt: 2, + }, + { + source: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,", + valid: true, + elementCnt: 2, + }, + { + source: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, ", + valid: true, + elementCnt: 2, + }, + { + source: " TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,", + valid: true, + elementCnt: 2, + }, + } + + for id, c := range cases { + t.Run(fmt.Sprintf("%d", id), func(t *testing.T) { + as := splitAndRemoveLastSeparator(c.source, defaultArraySeparator) + if len(as) == 0 && c.valid { + t.Errorf("Split did not work with %q", c.source) + } + + if c.elementCnt != len(as) { + t.Errorf("Split did not work with %q expected: %d got: %d", c.source, c.elementCnt, len(as)) + } + }) + } +} + +func TestCompareOp(t *testing.T) { + cases := []struct { + label string + op string + flagVal string // Test output. + compareValue string // Flag value to compare with. + expectedResultPattern string + flagName string // Compared flag name. + testResult bool + }{ + // Test Op not matching + {label: "empty - op", op: "", flagVal: "", compareValue: "", expectedResultPattern: "", testResult: false, flagName: ""}, + {label: "op=blah", op: "blah", flagVal: "foo", compareValue: "bar", expectedResultPattern: "", testResult: false, flagName: ""}, + + // Test Op "eq" + {label: "op=eq, both empty", op: "eq", flagVal: "", compareValue: "", expectedResultPattern: "'' is equal to ''", testResult: true, flagName: ""}, + + { + label: "op=eq, true==true", op: "eq", flagVal: "true", + compareValue: "true", + expectedResultPattern: "'parameterTrue' is equal to 'true'", + testResult: true, + flagName: "parameterTrue", + }, + + { + label: "op=eq, false==false", op: "eq", flagVal: "false", + compareValue: "false", + expectedResultPattern: "'parameterFalse' is equal to 'false'", + testResult: true, + flagName: "parameterFalse", + }, + + { + label: "op=eq, false==true", op: "eq", flagVal: "false", + compareValue: "true", + expectedResultPattern: "'parameterFalse' is equal to 'true'", + testResult: false, + flagName: "parameterFalse", + }, + + { + label: "op=eq, strings match", op: "eq", flagVal: "KubeletConfiguration", + compareValue: "KubeletConfiguration", + expectedResultPattern: "'--FlagNameKubeletConf' is equal to 'KubeletConfiguration'", + testResult: true, + flagName: "--FlagNameKubeletConf", + }, + + { + label: "op=eq, flagVal=empty", op: "eq", flagVal: "", + compareValue: "KubeletConfiguration", + expectedResultPattern: "'--FlagNameKubeletConf' is equal to 'KubeletConfiguration'", + testResult: false, + flagName: "--FlagNameKubeletConf", + }, + + { + label: "op=eq, compareValue=empty", + op: "eq", + flagVal: "KubeletConfiguration", + compareValue: "", + expectedResultPattern: "'--FlagNameKubeletConf' is equal to ''", + testResult: false, + flagName: "--FlagNameKubeletConf", + }, + + // Test Op "noteq" + { + label: "op=noteq, both empty", + op: "noteq", + flagVal: "", + compareValue: "", + expectedResultPattern: "'parameter' is not equal to ''", + testResult: false, + flagName: "parameter", + }, + + { + label: "op=noteq, true!=true", + op: "noteq", + flagVal: "true", + compareValue: "true", + expectedResultPattern: "'parameterTrue' is not equal to 'true'", + testResult: false, + flagName: "parameterTrue", + }, + + { + label: "op=noteq, false!=false", + op: "noteq", + flagVal: "false", + compareValue: "false", + expectedResultPattern: "'parameterFalse' is not equal to 'false'", + testResult: false, + flagName: "parameterFalse", + }, + + { + label: "op=noteq, false!=true", + op: "noteq", + flagVal: "false", + compareValue: "true", + expectedResultPattern: "'parameterFalse' is not equal to 'true'", + testResult: true, + flagName: "parameterFalse", + }, + + { + label: "op=noteq, strings match", + op: "noteq", + flagVal: "KubeletConfiguration", + compareValue: "KubeletConfiguration", + expectedResultPattern: "'--FlagNameKubeletConf' is not equal to 'KubeletConfiguration'", + testResult: false, + flagName: "--FlagNameKubeletConf", + }, + + { + label: "op=noteq, flagVal=empty", + op: "noteq", + flagVal: "", + compareValue: "KubeletConfiguration", + expectedResultPattern: "'--FlagNameKubeletConf' is not equal to 'KubeletConfiguration'", + testResult: true, + flagName: "--FlagNameKubeletConf", + }, + + { + label: "op=noteq, compareValue=empty", + op: "noteq", + flagVal: "KubeletConfiguration", + compareValue: "", + expectedResultPattern: "'--FlagNameKubeletConf' is not equal to ''", + testResult: true, + flagName: "--FlagNameKubeletConf", + }, + + // Test Op "gt" + { + label: "op=gt, both empty", + op: "gt", + flagVal: "", + compareValue: "", + expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", + testResult: false, + flagName: "flagName", + }, + { + label: "op=gt, 0 > 0", + op: "gt", + flagVal: "0", + compareValue: "0", expectedResultPattern: "'flagName' is greater than 0", + testResult: false, + flagName: "flagName", + }, + { + label: "op=gt, 4 > 5", + op: "gt", + flagVal: "4", + compareValue: "5", + expectedResultPattern: "'flagName' is greater than 5", + testResult: false, + flagName: "flagName", + }, + { + label: "op=gt, 5 > 4", + op: "gt", + flagVal: "5", + compareValue: "4", + expectedResultPattern: "'flagName' is greater than 4", + testResult: true, + flagName: "flagName", + }, + { + label: "op=gt, 5 > 5", + op: "gt", + flagVal: "5", + compareValue: "5", + expectedResultPattern: "'flagName' is greater than 5", + testResult: false, + flagName: "flagName", + }, + { + label: "op=gt, Pikachu > 5", + op: "gt", + flagVal: "Pikachu", + compareValue: "5", + expectedResultPattern: "Invalid Number(s) used for comparison: 'Pikachu' '5'", + testResult: false, + flagName: "flagName", + }, + { + label: "op=gt, 5 > Bulbasaur", + op: "gt", + flagVal: "5", + compareValue: "Bulbasaur", + expectedResultPattern: "Invalid Number(s) used for comparison: '5' 'Bulbasaur'", + testResult: false, + flagName: "flagName", + }, + // Test Op "lt" + { + label: "op=lt, both empty", + op: "lt", + flagVal: "", + compareValue: "", + expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", + testResult: false, + flagName: "flagName", + }, + { + label: "op=lt, 0 < 0", + op: "lt", + flagVal: "0", + compareValue: "0", + expectedResultPattern: "'flagName' is lower than 0", + testResult: false, + flagName: "flagName", + }, + { + label: "op=lt, 4 < 5", + op: "lt", + flagVal: "4", + compareValue: "5", + expectedResultPattern: "'flagName' is lower than 5", + testResult: true, + flagName: "flagName", + }, + { + label: "op=lt, 5 < 4", + op: "lt", + flagVal: "5", + compareValue: "4", + expectedResultPattern: "'flagName' is lower than 4", + testResult: false, + flagName: "flagName", + }, + { + label: "op=lt, 5 < 5", + op: "lt", + flagVal: "5", + compareValue: "5", + expectedResultPattern: "'flagName' is lower than 5", + testResult: false, + flagName: "flagName", + }, + { + label: "op=lt, Charmander < 5", + op: "lt", + flagVal: "Charmander", + compareValue: "5", + expectedResultPattern: "Invalid Number(s) used for comparison: 'Charmander' '5'", + testResult: false, + flagName: "flagName", + }, + { + label: "op=lt, 5 < Charmeleon", + op: "lt", + flagVal: "5", + compareValue: "Charmeleon", + expectedResultPattern: "Invalid Number(s) used for comparison: '5' 'Charmeleon'", + testResult: false, + flagName: "flagName", + }, + // Test Op "gte" + { + label: "op=gte, both empty", + op: "gte", + flagVal: "", + compareValue: "", + expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", + testResult: false, + flagName: "flagName", + }, + { + label: "op=gte, 0 >= 0", + op: "gte", + flagVal: "0", + compareValue: "0", + expectedResultPattern: "'flagName' is greater or equal to 0", + testResult: true, + flagName: "flagName", + }, + { + label: "op=gte, 4 >= 5", + op: "gte", + flagVal: "4", + compareValue: "5", + expectedResultPattern: "'flagName' is greater or equal to 5", + testResult: false, + flagName: "flagName", + }, + { + label: "op=gte, 5 >= 4", + op: "gte", + flagVal: "5", + compareValue: "4", + expectedResultPattern: "'flagName' is greater or equal to 4", + testResult: true, + flagName: "flagName", + }, + { + label: "op=gte, 5 >= 5", + op: "gte", + flagVal: "5", + compareValue: "5", + expectedResultPattern: "'flagName' is greater or equal to 5", + testResult: true, + flagName: "flagName", + }, + { + label: "op=gte, Ekans >= 5", + op: "gte", + flagVal: "Ekans", + compareValue: "5", + expectedResultPattern: "Invalid Number(s) used for comparison: 'Ekans' '5'", + testResult: false, + flagName: "flagName", + }, + { + label: "op=gte, 4 >= Zubat", + op: "gte", + flagVal: "4", + compareValue: "Zubat", + expectedResultPattern: "Invalid Number(s) used for comparison: '4' 'Zubat'", + testResult: false, + flagName: "flagName", + }, + // Test Op "lte" + { + label: "op=lte, both empty", + op: "lte", + flagVal: "", + compareValue: "", + expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", + testResult: false, + flagName: "flagName", + }, + { + label: "op=lte, 0 <= 0", + op: "lte", + flagVal: "0", + compareValue: "0", + expectedResultPattern: "'flagName' is lower or equal to 0", + testResult: true, + flagName: "flagName", + }, + { + label: "op=lte, 4 <= 5", + op: "lte", + flagVal: "4", + compareValue: "5", + expectedResultPattern: "'flagName' is lower or equal to 5", + testResult: true, + flagName: "flagName", + }, + { + label: "op=lte, 5 <= 4", + op: "lte", + flagVal: "5", + compareValue: "4", + expectedResultPattern: "'flagName' is lower or equal to 4", + testResult: false, + flagName: "flagName", + }, + { + label: "op=lte, 5 <= 5", + op: "lte", + flagVal: "5", + compareValue: "5", + expectedResultPattern: "'flagName' is lower or equal to 5", + testResult: true, + flagName: "flagName", + }, + { + label: "op=lte, Venomoth <= 4", + op: "lte", + flagVal: "Venomoth", + compareValue: "4", + expectedResultPattern: "Invalid Number(s) used for comparison: 'Venomoth' '4'", + testResult: false, + flagName: "flagName", + }, + { + label: "op=lte, 5 <= Meowth", + op: "lte", + flagVal: "5", + compareValue: "Meowth", + expectedResultPattern: "Invalid Number(s) used for comparison: '5' 'Meowth'", + testResult: false, + flagName: "flagName", + }, + + // Test Op "has" + { + label: "op=has, both empty", + op: "has", + flagVal: "", + compareValue: "", + expectedResultPattern: "'flagName' has ''", + testResult: true, + flagName: "flagName", + }, + { + label: "op=has, flagVal=empty", + op: "has", + flagVal: "", + compareValue: "blah", + expectedResultPattern: "'flagName' has 'blah'", + testResult: false, + flagName: "flagName", + }, + { + label: "op=has, compareValue=empty", + op: "has", + flagVal: "blah", + compareValue: "", + expectedResultPattern: "'flagName-blah' has ''", + testResult: true, + flagName: "flagName-blah", + }, + { + label: "op=has, 'blah' has 'la'", + op: "has", + flagVal: "blah", + compareValue: "la", + expectedResultPattern: "'flagName-blah' has 'la'", + testResult: true, + flagName: "flagName-blah", + }, + { + label: "op=has, 'blah' has 'LA'", + op: "has", + flagVal: "blah", + compareValue: "LA", + expectedResultPattern: "'flagName-blah' has 'LA'", + testResult: false, + flagName: "flagName-blah", + }, + { + label: "op=has, 'blah' has 'lo'", + op: "has", + flagVal: "blah", + compareValue: "lo", + expectedResultPattern: "'flagName-blah' has 'lo'", + testResult: false, + flagName: "flagName-blah", + }, + + // Test Op "nothave" + { + label: "op=nothave, both empty", + op: "nothave", + flagVal: "", + compareValue: "", + expectedResultPattern: "'flagName' does not have ''", + testResult: false, + flagName: "flagName", + }, + { + label: "op=nothave, flagVal=empty", + op: "nothave", + flagVal: "", + compareValue: "blah", + expectedResultPattern: "'flagName' does not have 'blah'", + testResult: true, + flagName: "flagName", + }, + { + label: "op=nothave, compareValue=empty", + op: "nothave", + flagVal: "blah", + compareValue: "", + expectedResultPattern: "'flagName-blah' does not have ''", + testResult: false, + flagName: "flagName-blah", + }, + { + label: "op=nothave, 'blah' not have 'la'", + op: "nothave", + flagVal: "blah", + compareValue: "la", + expectedResultPattern: "'flagName-blah' does not have 'la'", + testResult: false, + flagName: "flagName-blah", + }, + { + label: "op=nothave, 'blah' not have 'LA'", + op: "nothave", + flagVal: "blah", + compareValue: "LA", + expectedResultPattern: "'flagName-blah' does not have 'LA'", + testResult: true, + flagName: "flagName-blah", + }, + { + label: "op=nothave, 'blah' not have 'lo'", + op: "nothave", + flagVal: "blah", + compareValue: "lo", + expectedResultPattern: "'flagName-blah' does not have 'lo'", + testResult: true, + flagName: "flagName-blah", + }, + + // Test Op "regex" + { + label: "op=regex, both empty", + op: "regex", + flagVal: "", + compareValue: "", + expectedResultPattern: "'flagName' matched by regex expression ''", + testResult: true, + flagName: "flagName", + }, + { + label: "op=regex, flagVal=empty", + op: "regex", + flagVal: "", + compareValue: "blah", + expectedResultPattern: "'flagName' matched by regex expression 'blah'", + testResult: false, + flagName: "flagName", + }, + + // Test Op "valid_elements" + { + label: "op=valid_elements, valid_elements both empty", + op: "valid_elements", + flagVal: "", + compareValue: "", + expectedResultPattern: "'flagWithMultipleElements' contains valid elements from ''", + testResult: true, + flagName: "flagWithMultipleElements", + }, + + { + label: "op=valid_elements, valid_elements flagVal empty", + op: "valid_elements", + flagVal: "", + compareValue: "a,b", + expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,b'", + testResult: false, + flagName: "flagWithMultipleElements", + }, + + { + label: "op=valid_elements, valid_elements compareValue empty", + op: "valid_elements", + flagVal: "a,b", + compareValue: "", + expectedResultPattern: "'flagWithMultipleElements' contains valid elements from ''", + testResult: false, + flagName: "flagWithMultipleElements", + }, + { + label: "op=valid_elements, valid_elements two list equals", + op: "valid_elements", + flagVal: "a,b,c", + compareValue: "a,b,c", + expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,b,c'", + testResult: true, + flagName: "flagWithMultipleElements", + }, + { + label: "op=valid_elements, valid_elements partial flagVal valid", + op: "valid_elements", + flagVal: "a,c", + compareValue: "a,b,c", + expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,b,c'", + testResult: true, + flagName: "flagWithMultipleElements", + }, + { + label: "op=valid_elements, valid_elements partial compareValue valid", + op: "valid_elements", + flagVal: "a,b,c", + compareValue: "a,c", + expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,c'", + testResult: false, + flagName: "flagWithMultipleElements", + }, + + // Test Op "bitmask" + { + label: "op=bitmask, 644 AND 640", + op: "bitmask", + flagVal: "640", + compareValue: "644", + expectedResultPattern: "etc/fileExamplePermission640 has permissions 640, expected 644 or more restrictive", + testResult: true, + flagName: "etc/fileExamplePermission640", + }, + { + label: "op=bitmask, 644 AND 777", + op: "bitmask", + flagVal: "777", + compareValue: "644", + expectedResultPattern: "etc/fileExamplePermission777 has permissions 777, expected 644 or more restrictive", + testResult: false, + flagName: "etc/fileExamplePermission777", + }, + { + label: "op=bitmask, 644 AND 444", + op: "bitmask", + flagVal: "444", + compareValue: "644", + expectedResultPattern: "etc/fileExamplePermission444 has permissions 444, expected 644 or more restrictive", + testResult: true, + flagName: "etc/fileExamplePermission444", + }, + { + label: "op=bitmask, 644 AND 211", + op: "bitmask", + flagVal: "211", + compareValue: "644", + expectedResultPattern: "etc/fileExamplePermission211 has permissions 211, expected 644 or more restrictive", + testResult: false, + flagName: "etc/fileExamplePermission211", + }, + { + label: "op=bitmask, Harry AND 211", + op: "bitmask", + flagVal: "Harry", + compareValue: "644", + expectedResultPattern: "Not numeric value - flag: Harry", + testResult: false, + flagName: "etc/fileExample", + }, + { + label: "op=bitmask, 644 AND Potter", + op: "bitmask", + flagVal: "211", + compareValue: "Potter", + expectedResultPattern: "Not numeric value - flag: Potter", + testResult: false, + flagName: "etc/fileExample", + }, + } + + for _, c := range cases { + t.Run(c.label, func(t *testing.T) { + expectedResultPattern, testResult := compareOp(c.op, c.flagVal, c.compareValue, c.flagName) + if expectedResultPattern != c.expectedResultPattern { + t.Errorf("'expectedResultPattern' did not match - op: %q expected:%q got:%q", c.op, c.expectedResultPattern, expectedResultPattern) + } + + if testResult != c.testResult { + t.Errorf("'testResult' did not match - lop: %q expected:%t got:%t", c.op, c.testResult, testResult) + } + }) + } +} + +func TestToNumeric(t *testing.T) { + cases := []struct { + firstValue string + secondValue string + expectedToFail bool + }{ + { + firstValue: "a", + secondValue: "b", + expectedToFail: true, + }, + { + firstValue: "5", + secondValue: "b", + expectedToFail: true, + }, + { + firstValue: "5", + secondValue: "6", + expectedToFail: false, + }, + } + + for id, c := range cases { + t.Run(fmt.Sprintf("%d", id), func(t *testing.T) { + f, s, err := toNumeric(c.firstValue, c.secondValue) + if c.expectedToFail && err == nil { + t.Errorf("Expected error while converting %s and %s", c.firstValue, c.secondValue) + } + + if !c.expectedToFail && (f != 5 || s != 6) { + t.Errorf("Expected to return %d,%d - got %d,%d", 5, 6, f, s) + } + }) + } +} + +func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { + type Resources struct { + Resources []string `json:"resources"` + Providers []map[string]interface{} `json:"providers"` + } + + type EncryptionConfig struct { + Kind string `json:"kind"` + ApiVersion string `json:"apiVersion"` + Resources []Resources `json:"resources"` + } + + type Key struct { + Secret string `json:"secret"` + Name string `json:"name"` + } + + type Aescbc struct { + Keys []Key `json:"keys"` + } + + type SecretBox struct { + Keys []Key `json:"keys"` + } + + type Aesgcm struct { + Keys []Key `json:"keys"` + } + + // identity disable encryption when set as the first parameter + type Identity struct{} + + cases := []struct { + name string + jsonPath string + jsonInterface EncryptionConfig + expectedResult string + expectedToFail bool + }{ + { + "JSONPath parse works, results match", + "{.resources[*].providers[*].aescbc.keys[*].secret}", + EncryptionConfig{ + Kind: "EncryptionConfig", + ApiVersion: "v1", + Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ + {"aescbc": Aescbc{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, + "secret1", + false, + }, + { + "JSONPath parse works, results match", + "{.resources[*].providers[*].aescbc.keys[*].name}", + EncryptionConfig{ + Kind: "EncryptionConfig", + ApiVersion: "v1", + Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ + {"aescbc": Aescbc{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, + "name1", + false, + }, + { + "JSONPath parse works, results don't match", + "{.resources[*].providers[*].aescbc.keys[*].secret}", + EncryptionConfig{ + Kind: "EncryptionConfig", + ApiVersion: "v1", + Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ + {"aesgcm": Aesgcm{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, + "secret1", + true, + }, + { + "JSONPath parse works, results match", + "{.resources[*].providers[*].aesgcm.keys[*].secret}", + EncryptionConfig{ + Kind: "EncryptionConfig", + ApiVersion: "v1", + Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ + {"aesgcm": Aesgcm{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, + "secret1", + false, + }, + { + "JSONPath parse works, results match", + "{.resources[*].providers[*].secretbox.keys[*].secret}", + EncryptionConfig{ + Kind: "EncryptionConfig", + ApiVersion: "v1", + Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ + {"secretbox": SecretBox{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, + "secret1", + false, + }, + { + "JSONPath parse works, results match", + "{.resources[*].providers[*].aescbc.keys[*].secret}", + EncryptionConfig{ + Kind: "EncryptionConfig", + ApiVersion: "v1", + Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ + {"aescbc": Aescbc{Keys: []Key{{Secret: "secret1", Name: "name1"}, {Secret: "secret2", Name: "name2"}}}}, + }}}, + }, + "secret1 secret2", + false, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + result, err := executeJSONPath(c.jsonPath, c.jsonInterface) + if err != nil && !c.expectedToFail { + t.Fatalf("jsonPath:%q, expectedResult:%q got:%v", c.jsonPath, c.expectedResult, err) + } + if c.expectedResult != result && !c.expectedToFail { + t.Errorf("jsonPath:%q, expectedResult:%q got:%q", c.jsonPath, c.expectedResult, result) + } + }) + } +} diff --git a/cmd/kvisor/kubebench/common.go b/cmd/kvisor/kubebench/common.go new file mode 100644 index 00000000..a590505c --- /dev/null +++ b/cmd/kvisor/kubebench/common.go @@ -0,0 +1,526 @@ +// Copyright © 2017 Aqua Security Software Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubebench + +import ( + "bufio" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + check2 "github.com/castai/kvisor/cmd/kvisor/kubebench/check" + "github.com/golang/glog" + "github.com/spf13/viper" +) + +// NewRunFilter constructs a Predicate based on FilterOpts which determines whether tested Checks should be run or not. +func NewRunFilter(opts FilterOpts) (check2.Predicate, error) { + if opts.CheckList != "" && opts.GroupList != "" { + return nil, fmt.Errorf("group option and check option can't be used together") + } + + var groupIDs map[string]bool + if opts.GroupList != "" { + groupIDs = cleanIDs(opts.GroupList) + } + + var checkIDs map[string]bool + if opts.CheckList != "" { + checkIDs = cleanIDs(opts.CheckList) + } + + return func(g *check2.Group, c *check2.Check) bool { + test := true + if len(groupIDs) > 0 { + _, ok := groupIDs[g.ID] + test = test && ok + } + + if len(checkIDs) > 0 { + _, ok := checkIDs[c.ID] + test = test && ok + } + + test = test && (opts.Scored && c.Scored || opts.Unscored && !c.Scored) + + return test + }, nil +} + +func runChecks(nodetype check2.NodeType, testYamlFile, detectedVersion string) { + // Verify config file was loaded into Viper during Cobra sub-command initialization. + if configFileError != nil { + colorPrint(check2.FAIL, fmt.Sprintf("Failed to read config file: %v\n", configFileError)) + os.Exit(1) + } + + in, err := ioutil.ReadFile(testYamlFile) + if err != nil { + exitWithError(fmt.Errorf("error opening %s test file: %v", testYamlFile, err)) + } + + glog.V(1).Info(fmt.Sprintf("Using test file: %s\n", testYamlFile)) + + // Get the viper config for this section of tests + typeConf := viper.Sub(string(nodetype)) + if typeConf == nil { + colorPrint(check2.FAIL, fmt.Sprintf("No config settings for %s\n", string(nodetype))) + os.Exit(1) + } + + // Get the set of executables we need for this section of the tests + binmap, err := getBinaries(typeConf, nodetype) + // Checks that the executables we need for the section are running. + if err != nil { + glog.V(1).Info(fmt.Sprintf("failed to get a set of executables needed for tests: %v", err)) + } + + confmap := getFiles(typeConf, "config") + svcmap := getFiles(typeConf, "service") + kubeconfmap := getFiles(typeConf, "kubeconfig") + cafilemap := getFiles(typeConf, "ca") + datadirmap := getFiles(typeConf, "datadir") + + // Variable substitutions. Replace all occurrences of variables in controls files. + s := string(in) + s, binSubs := makeSubstitutions(s, "bin", binmap) + s, _ = makeSubstitutions(s, "conf", confmap) + s, _ = makeSubstitutions(s, "svc", svcmap) + s, _ = makeSubstitutions(s, "kubeconfig", kubeconfmap) + s, _ = makeSubstitutions(s, "cafile", cafilemap) + s, _ = makeSubstitutions(s, "datadir", datadirmap) + + controls, err := check2.NewControls(nodetype, []byte(s), detectedVersion) + if err != nil { + exitWithError(fmt.Errorf("error setting up %s controls: %v", nodetype, err)) + } + + runner := check2.NewRunner() + filter, err := NewRunFilter(filterOpts) + if err != nil { + exitWithError(fmt.Errorf("error setting up run filter: %v", err)) + } + + generateDefaultEnvAudit(controls, binSubs) + + controls.RunChecks(runner, filter, parseSkipIds(skipIds)) + controlsCollection = append(controlsCollection, controls) +} + +func generateDefaultEnvAudit(controls *check2.Controls, binSubs []string) { + for _, group := range controls.Groups { + for _, checkItem := range group.Checks { + if checkItem.Tests != nil && !checkItem.DisableEnvTesting { + for _, test := range checkItem.Tests.TestItems { + if test.Env != "" && checkItem.AuditEnv == "" { + binPath := "" + + if len(binSubs) == 1 { + binPath = binSubs[0] + } else { + glog.V(1).Infof("AuditEnv not explicit for check (%s), where bin path cannot be determined", checkItem.ID) + } + + if test.Env != "" && checkItem.AuditEnv == "" { + checkItem.AuditEnv = fmt.Sprintf("cat \"/proc/$(/bin/ps -C %s -o pid= | tr -d ' ')/environ\" | tr '\\0' '\\n'", binPath) + } + } + } + } + } + } +} + +func parseSkipIds(skipIds string) map[string]bool { + skipIdMap := make(map[string]bool, 0) + if skipIds != "" { + for _, id := range strings.Split(skipIds, ",") { + skipIdMap[strings.Trim(id, " ")] = true + } + } + return skipIdMap +} + +// colorPrint outputs the state in a specific colour, along with a message string +func colorPrint(state check2.State, s string) { + colors[state].Printf("[%s] ", state) + fmt.Printf("%s", s) +} + +// prettyPrint outputs the results to stdout in human-readable format +func prettyPrint(r *check2.Controls, summary check2.Summary) { + // Print check results. + if !noResults { + colorPrint(check2.INFO, fmt.Sprintf("%s %s\n", r.ID, r.Text)) + for _, g := range r.Groups { + colorPrint(check2.INFO, fmt.Sprintf("%s %s\n", g.ID, g.Text)) + for _, c := range g.Checks { + colorPrint(c.State, fmt.Sprintf("%s %s\n", c.ID, c.Text)) + + if includeTestOutput && c.State == check2.FAIL && len(c.ActualValue) > 0 { + printRawOutput(c.ActualValue) + } + } + } + + fmt.Println() + } + + // Print remediations. + if !noRemediations { + if summary.Fail > 0 || summary.Warn > 0 { + colors[check2.WARN].Printf("== Remediations %s ==\n", r.Type) + for _, g := range r.Groups { + for _, c := range g.Checks { + if c.State == check2.FAIL { + fmt.Printf("%s %s\n", c.ID, c.Remediation) + } + if c.State == check2.WARN { + // Print the error if test failed due to problem with the audit command + if c.Reason != "" && c.Type != "manual" { + fmt.Printf("%s audit test did not run: %s\n", c.ID, c.Reason) + } else { + fmt.Printf("%s %s\n", c.ID, c.Remediation) + } + } + } + } + fmt.Println() + } + } + + // Print summary setting output color to highest severity. + if !noSummary { + printSummary(summary, string(r.Type)) + } +} + +func printSummary(summary check2.Summary, sectionName string) { + var res check2.State + if summary.Fail > 0 { + res = check2.FAIL + } else if summary.Warn > 0 { + res = check2.WARN + } else { + res = check2.PASS + } + + colors[res].Printf("== Summary %s ==\n", sectionName) + fmt.Printf("%d checks PASS\n%d checks FAIL\n%d checks WARN\n%d checks INFO\n\n", + summary.Pass, summary.Fail, summary.Warn, summary.Info, + ) +} + +// loadConfig finds the correct config dir based on the kubernetes version, +// merges any specific config.yaml file found with the main config +// and returns the benchmark file to use. +func loadConfig(nodetype check2.NodeType, benchmarkVersion string) string { + var file string + var err error + + switch nodetype { + case check2.MASTER: + file = masterFile + case check2.NODE: + file = nodeFile + case check2.CONTROLPLANE: + file = controlplaneFile + case check2.ETCD: + file = etcdFile + case check2.POLICIES: + file = policiesFile + case check2.MANAGEDSERVICES: + file = managedservicesFile + } + + path, err := getConfigFilePath(benchmarkVersion, file) + if err != nil { + exitWithError(fmt.Errorf("can't find %s controls file in %s: %v", nodetype, cfgDir, err)) + } + + // Merge version-specific config if any. + mergeConfig(path) + + return filepath.Join(path, file) +} + +func mergeConfig(path string) error { + viper.SetConfigFile(path + "/config.yaml") + err := viper.MergeInConfig() + if err != nil { + if os.IsNotExist(err) { + glog.V(2).Info(fmt.Sprintf("No version-specific config.yaml file in %s", path)) + } else { + return fmt.Errorf("couldn't read config file %s: %v", path+"/config.yaml", err) + } + } + + glog.V(1).Info(fmt.Sprintf("Using config file: %s\n", viper.ConfigFileUsed())) + + return nil +} + +func mapToBenchmarkVersion(kubeToBenchmarkMap map[string]string, kv string) (string, error) { + kvOriginal := kv + cisVersion, found := kubeToBenchmarkMap[kv] + glog.V(2).Info(fmt.Sprintf("mapToBenchmarkVersion for k8sVersion: %q cisVersion: %q found: %t\n", kv, cisVersion, found)) + for !found && (kv != defaultKubeVersion && !isEmpty(kv)) { + kv = decrementVersion(kv) + cisVersion, found = kubeToBenchmarkMap[kv] + glog.V(2).Info(fmt.Sprintf("mapToBenchmarkVersion for k8sVersion: %q cisVersion: %q found: %t\n", kv, cisVersion, found)) + } + + if !found { + glog.V(1).Info(fmt.Sprintf("mapToBenchmarkVersion unable to find a match for: %q", kvOriginal)) + glog.V(3).Info(fmt.Sprintf("mapToBenchmarkVersion kubeToBenchmarkMap: %#v", kubeToBenchmarkMap)) + return "", fmt.Errorf("unable to find a matching Benchmark Version match for kubernetes version: %s", kvOriginal) + } + + return cisVersion, nil +} + +func loadVersionMapping(v *viper.Viper) (map[string]string, error) { + kubeToBenchmarkMap := v.GetStringMapString("version_mapping") + if kubeToBenchmarkMap == nil || (len(kubeToBenchmarkMap) == 0) { + return nil, fmt.Errorf("config file is missing 'version_mapping' section") + } + + return kubeToBenchmarkMap, nil +} + +func loadTargetMapping(v *viper.Viper) (map[string][]string, error) { + benchmarkVersionToTargetsMap := v.GetStringMapStringSlice("target_mapping") + if len(benchmarkVersionToTargetsMap) == 0 { + return nil, fmt.Errorf("config file is missing 'target_mapping' section") + } + + return benchmarkVersionToTargetsMap, nil +} + +func getBenchmarkVersion(kubeVersion, benchmarkVersion string, platform Platform, v *viper.Viper) (bv string, err error) { + detecetedKubeVersion = "none" + if !isEmpty(kubeVersion) && !isEmpty(benchmarkVersion) { + return "", fmt.Errorf("It is an error to specify both --version and --benchmark flags") + } + if isEmpty(benchmarkVersion) && isEmpty(kubeVersion) && !isEmpty(platform.Name) { + benchmarkVersion = getPlatformBenchmarkVersion(platform) + if !isEmpty(benchmarkVersion) { + detecetedKubeVersion = benchmarkVersion + } + } + + if isEmpty(benchmarkVersion) { + if isEmpty(kubeVersion) { + kv, err := getKubeVersion() + if err != nil { + return "", fmt.Errorf("Version check failed: %s\nAlternatively, you can specify the version with --version", err) + } + kubeVersion = kv.BaseVersion() + detecetedKubeVersion = kubeVersion + } + + kubeToBenchmarkMap, err := loadVersionMapping(v) + if err != nil { + return "", err + } + + benchmarkVersion, err = mapToBenchmarkVersion(kubeToBenchmarkMap, kubeVersion) + if err != nil { + return "", err + } + + glog.V(2).Info(fmt.Sprintf("Mapped Kubernetes version: %s to Benchmark version: %s", kubeVersion, benchmarkVersion)) + } + + glog.V(1).Info(fmt.Sprintf("Kubernetes version: %q to Benchmark version: %q", kubeVersion, benchmarkVersion)) + return benchmarkVersion, nil +} + +// isMaster verify if master components are running on the node. +func isMaster() bool { + return isThisNodeRunning(check2.MASTER) +} + +// isEtcd verify if etcd components are running on the node. +func isEtcd() bool { + return isThisNodeRunning(check2.ETCD) +} + +func isThisNodeRunning(nodeType check2.NodeType) bool { + glog.V(3).Infof("Checking if the current node is running %s components", nodeType) + nodeTypeConf := viper.Sub(string(nodeType)) + if nodeTypeConf == nil { + glog.V(2).Infof("No config for %s components found", nodeType) + return false + } + + components, err := getBinariesFunc(nodeTypeConf, nodeType) + if err != nil { + glog.V(2).Infof("Failed to find %s binaries: %v", nodeType, err) + return false + } + if len(components) == 0 { + glog.V(2).Infof("No %s binaries specified", nodeType) + return false + } + + glog.V(2).Infof("Node is running %s components", nodeType) + return true +} + +func exitCodeSelection(controlsCollection []*check2.Controls) int { + for _, control := range controlsCollection { + if control.Fail > 0 { + return exitCode + } + } + + return 0 +} + +func writeOutput(controlsCollection []*check2.Controls) { + sort.Slice(controlsCollection, func(i, j int) bool { + iid, _ := strconv.Atoi(controlsCollection[i].ID) + jid, _ := strconv.Atoi(controlsCollection[j].ID) + return iid < jid + }) + if junitFmt { + writeJunitOutput(controlsCollection) + return + } + if jsonFmt { + writeJSONOutput(controlsCollection) + return + } + writeStdoutOutput(controlsCollection) +} + +func writeJSONOutput(controlsCollection []*check2.Controls) { + var out []byte + var err error + if !noTotals { + var totals check2.OverallControls + totals.Controls = controlsCollection + totals.Totals = getSummaryTotals(controlsCollection) + out, err = json.Marshal(totals) + } else { + out, err = json.Marshal(controlsCollection) + } + if err != nil { + exitWithError(fmt.Errorf("failed to output in JSON format: %v", err)) + } + printOutput(string(out), outputFile) +} + +func writeJunitOutput(controlsCollection []*check2.Controls) { + // QuickFix for issue https://github.com/aquasecurity/kube-bench/issues/883 + // Should consider to deprecate of switch to using Junit template + prefix := "\n" + suffix := "\n" + var outputAllControls []byte + for _, controls := range controlsCollection { + tempOut, err := controls.JUnit() + outputAllControls = append(outputAllControls[:], tempOut[:]...) + if err != nil { + exitWithError(fmt.Errorf("failed to output in JUnit format: %v", err)) + } + } + printOutput(prefix+string(outputAllControls)+suffix, outputFile) +} + +func writeStdoutOutput(controlsCollection []*check2.Controls) { + for _, controls := range controlsCollection { + summary := controls.Summary + prettyPrint(controls, summary) + } + if !noTotals { + printSummary(getSummaryTotals(controlsCollection), "total") + } +} + +func getSummaryTotals(controlsCollection []*check2.Controls) check2.Summary { + var totalSummary check2.Summary + for _, controls := range controlsCollection { + summary := controls.Summary + totalSummary.Fail = totalSummary.Fail + summary.Fail + totalSummary.Warn = totalSummary.Warn + summary.Warn + totalSummary.Pass = totalSummary.Pass + summary.Pass + totalSummary.Info = totalSummary.Info + summary.Info + } + return totalSummary +} + +func printRawOutput(output string) { + for _, row := range strings.Split(output, "\n") { + fmt.Println(fmt.Sprintf("\t %s", row)) + } +} + +func writeOutputToFile(output string, outputFile string) error { + file, err := os.Create(outputFile) + if err != nil { + return err + } + defer file.Close() + + w := bufio.NewWriter(file) + fmt.Fprintln(w, output) + return w.Flush() +} + +func printOutput(output string, outputFile string) { + if outputFile == "" { + fmt.Println(output) + } else { + err := writeOutputToFile(output, outputFile) + if err != nil { + exitWithError(fmt.Errorf("Failed to write to output file %s: %v", outputFile, err)) + } + } +} + +// validTargets helps determine if the targets +// are legitimate for the benchmarkVersion. +func validTargets(benchmarkVersion string, targets []string, v *viper.Viper) (bool, error) { + benchmarkVersionToTargetsMap, err := loadTargetMapping(v) + if err != nil { + return false, err + } + providedTargets, found := benchmarkVersionToTargetsMap[benchmarkVersion] + if !found { + return false, fmt.Errorf("No targets configured for %s", benchmarkVersion) + } + + for _, pt := range targets { + f := false + for _, t := range providedTargets { + if pt == strings.ToLower(t) { + f = true + break + } + } + + if !f { + return false, nil + } + } + + return true, nil +} diff --git a/cmd/kvisor/kubebench/common_test.go b/cmd/kvisor/kubebench/common_test.go new file mode 100644 index 00000000..e692be44 --- /dev/null +++ b/cmd/kvisor/kubebench/common_test.go @@ -0,0 +1,861 @@ +// Copyright © 2017-2019 Aqua Security Software Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubebench + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + "time" + + check2 "github.com/castai/kvisor/cmd/kvisor/kubebench/check" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" +) + +type JsonOutputFormat struct { + Controls []*check2.Controls `json:"Controls"` + TotalSummary map[string]int `json:"Totals"` +} + +type JsonOutputFormatNoTotals struct { + Controls []*check2.Controls `json:"Controls"` +} + +func TestParseSkipIds(t *testing.T) { + skipMap := parseSkipIds("4.12,4.13,5") + _, fourTwelveExists := skipMap["4.12"] + _, fourThirteenExists := skipMap["4.13"] + _, fiveExists := skipMap["5"] + _, other := skipMap["G1"] + assert.True(t, fourThirteenExists) + assert.True(t, fourTwelveExists) + assert.True(t, fiveExists) + assert.False(t, other) +} + +func TestNewRunFilter(t *testing.T) { + type TestCase struct { + Name string + FilterOpts FilterOpts + Group *check2.Group + Check *check2.Check + + Expected bool + } + + testCases := []TestCase{ + { + Name: "Should return true when scored flag is enabled and check is scored", + FilterOpts: FilterOpts{Scored: true, Unscored: false}, + Group: &check2.Group{}, + Check: &check2.Check{Scored: true}, + Expected: true, + }, + { + Name: "Should return false when scored flag is enabled and check is not scored", + FilterOpts: FilterOpts{Scored: true, Unscored: false}, + Group: &check2.Group{}, + Check: &check2.Check{Scored: false}, + Expected: false, + }, + + { + Name: "Should return true when unscored flag is enabled and check is not scored", + FilterOpts: FilterOpts{Scored: false, Unscored: true}, + Group: &check2.Group{}, + Check: &check2.Check{Scored: false}, + Expected: true, + }, + { + Name: "Should return false when unscored flag is enabled and check is scored", + FilterOpts: FilterOpts{Scored: false, Unscored: true}, + Group: &check2.Group{}, + Check: &check2.Check{Scored: true}, + Expected: false, + }, + + { + Name: "Should return true when group flag contains group's ID", + FilterOpts: FilterOpts{Scored: true, Unscored: true, GroupList: "G1,G2,G3"}, + Group: &check2.Group{ID: "G2"}, + Check: &check2.Check{}, + Expected: true, + }, + { + Name: "Should return false when group flag doesn't contain group's ID", + FilterOpts: FilterOpts{GroupList: "G1,G3"}, + Group: &check2.Group{ID: "G2"}, + Check: &check2.Check{}, + Expected: false, + }, + + { + Name: "Should return true when check flag contains check's ID", + FilterOpts: FilterOpts{Scored: true, Unscored: true, CheckList: "C1,C2,C3"}, + Group: &check2.Group{}, + Check: &check2.Check{ID: "C2"}, + Expected: true, + }, + { + Name: "Should return false when check flag doesn't contain check's ID", + FilterOpts: FilterOpts{CheckList: "C1,C3"}, + Group: &check2.Group{}, + Check: &check2.Check{ID: "C2"}, + Expected: false, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + filter, _ := NewRunFilter(testCase.FilterOpts) + assert.Equal(t, testCase.Expected, filter(testCase.Group, testCase.Check)) + }) + } + + t.Run("Should return error when both group and check flags are used", func(t *testing.T) { + // given + opts := FilterOpts{GroupList: "G1", CheckList: "C1"} + // when + _, err := NewRunFilter(opts) + // then + assert.EqualError(t, err, "group option and check option can't be used together") + }) +} + +func TestIsMaster(t *testing.T) { + testCases := []struct { + name string + cfgFile string + getBinariesFunc func(*viper.Viper, check2.NodeType) (map[string]string, error) + isMaster bool + }{ + { + name: "valid config, is master and all components are running", + cfgFile: "./kubebench-rules/config.yaml", + getBinariesFunc: func(viper *viper.Viper, nt check2.NodeType) (strings map[string]string, i error) { + return map[string]string{"apiserver": "kube-apiserver"}, nil + }, + isMaster: true, + }, + { + name: "valid config, is master and but not all components are running", + cfgFile: "./kubebench-rules/config.yaml", + getBinariesFunc: func(viper *viper.Viper, nt check2.NodeType) (strings map[string]string, i error) { + return map[string]string{}, nil + }, + isMaster: false, + }, + { + name: "valid config, is master, not all components are running and fails to find all binaries", + cfgFile: "./kubebench-rules/config.yaml", + getBinariesFunc: func(viper *viper.Viper, nt check2.NodeType) (strings map[string]string, i error) { + return map[string]string{}, errors.New("failed to find binaries") + }, + isMaster: false, + }, + } + cfgDirOld := cfgDir + cfgDir = "../cfg" + defer func() { + cfgDir = cfgDirOld + }() + + execCode := `#!/bin/sh + echo "Server Version: v1.13.10" + ` + restore, err := fakeExecutableInPath("kubectl", execCode) + if err != nil { + t.Fatal("Failed when calling fakeExecutableInPath ", err) + } + defer restore() + + for _, tc := range testCases { + func() { + cfgFile = tc.cfgFile + initConfig() + + oldGetBinariesFunc := getBinariesFunc + getBinariesFunc = tc.getBinariesFunc + defer func() { + getBinariesFunc = oldGetBinariesFunc + cfgFile = "" + }() + + assert.Equal(t, tc.isMaster, isMaster(), tc.name) + }() + } +} + +func TestMapToCISVersion(t *testing.T) { + viperWithData, err := loadConfigForTest() + if err != nil { + t.Fatalf("Unable to load config file %v", err) + } + kubeToBenchmarkMap, err := loadVersionMapping(viperWithData) + if err != nil { + t.Fatalf("Unable to load config file %v", err) + } + + cases := []struct { + kubeVersion string + succeed bool + exp string + expErr string + }{ + {kubeVersion: "1.9", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: 1.9"}, + {kubeVersion: "1.11", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: 1.11"}, + {kubeVersion: "1.12", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: 1.12"}, + {kubeVersion: "1.13", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: 1.13"}, + {kubeVersion: "1.14", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: 1.14"}, + {kubeVersion: "1.15", succeed: true, exp: "cis-1.5"}, + {kubeVersion: "1.16", succeed: true, exp: "cis-1.6"}, + {kubeVersion: "1.17", succeed: true, exp: "cis-1.6"}, + {kubeVersion: "1.18", succeed: true, exp: "cis-1.6"}, + {kubeVersion: "1.19", succeed: true, exp: "cis-1.20"}, + {kubeVersion: "1.20", succeed: true, exp: "cis-1.20"}, + {kubeVersion: "1.21", succeed: true, exp: "cis-1.20"}, + {kubeVersion: "1.22", succeed: true, exp: "cis-1.23"}, + {kubeVersion: "1.23", succeed: true, exp: "cis-1.23"}, + {kubeVersion: "1.24", succeed: true, exp: "cis-1.24"}, + {kubeVersion: "1.25", succeed: true, exp: "cis-1.7"}, + {kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"}, + {kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"}, + {kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"}, + {kubeVersion: "unknown", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: unknown"}, + } + for _, c := range cases { + rv, err := mapToBenchmarkVersion(kubeToBenchmarkMap, c.kubeVersion) + if c.succeed { + if err != nil { + t.Errorf("[%q]-Unexpected error: %v", c.kubeVersion, err) + } + + if len(rv) == 0 { + t.Errorf("[%q]-missing return value", c.kubeVersion) + } + + if c.exp != rv { + t.Errorf("[%q]- expected %q but Got %q", c.kubeVersion, c.exp, rv) + } + + } else { + if c.exp != rv { + t.Errorf("[%q]-mapToBenchmarkVersion kubeversion: %q Got %q expected %s", c.kubeVersion, c.kubeVersion, rv, c.exp) + } + + if c.expErr != err.Error() { + t.Errorf("[%q]-mapToBenchmarkVersion expected Error: %q instead Got %q", c.kubeVersion, c.expErr, err.Error()) + } + } + } +} + +func TestLoadVersionMapping(t *testing.T) { + setDefault := func(v *viper.Viper, key string, value interface{}) *viper.Viper { + v.SetDefault(key, value) + return v + } + + viperWithData, err := loadConfigForTest() + if err != nil { + t.Fatalf("Unable to load config file %v", err) + } + + cases := []struct { + n string + v *viper.Viper + succeed bool + }{ + {n: "empty", v: viper.New(), succeed: false}, + { + n: "novals", + v: setDefault(viper.New(), "version_mapping", "novals"), + succeed: false, + }, + { + n: "good", + v: viperWithData, + succeed: true, + }, + } + for _, c := range cases { + rv, err := loadVersionMapping(c.v) + if c.succeed { + if err != nil { + t.Errorf("[%q]-Unexpected error: %v", c.n, err) + } + + if len(rv) == 0 { + t.Errorf("[%q]-missing mapping value", c.n) + } + } else { + if err == nil { + t.Errorf("[%q]-Expected error but got none", c.n) + } + } + } +} + +func TestGetBenchmarkVersion(t *testing.T) { + viperWithData, err := loadConfigForTest() + if err != nil { + t.Fatalf("Unable to load config file %v", err) + } + + type getBenchmarkVersionFnToTest func(kubeVersion, benchmarkVersion string, platform Platform, v *viper.Viper) (string, error) + + withFakeKubectl := func(kubeVersion, benchmarkVersion string, platform Platform, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) { + execCode := `#!/bin/sh + echo '{"serverVersion": {"major": "1", "minor": "18", "gitVersion": "v1.18.10"}}' + ` + restore, err := fakeExecutableInPath("kubectl", execCode) + if err != nil { + t.Fatal("Failed when calling fakeExecutableInPath ", err) + } + defer restore() + + return fn(kubeVersion, benchmarkVersion, platform, v) + } + + withNoPath := func(kubeVersion, benchmarkVersion string, platform Platform, v *viper.Viper, fn getBenchmarkVersionFnToTest) (string, error) { + restore, err := prunePath() + if err != nil { + t.Fatal("Failed when calling prunePath ", err) + } + defer restore() + + return fn(kubeVersion, benchmarkVersion, platform, v) + } + + type getBenchmarkVersionFn func(string, string, Platform, *viper.Viper, getBenchmarkVersionFnToTest) (string, error) + cases := []struct { + n string + kubeVersion string + benchmarkVersion string + platform Platform + v *viper.Viper + callFn getBenchmarkVersionFn + exp string + succeed bool + }{ + {n: "both versions", kubeVersion: "1.11", benchmarkVersion: "cis-1.3", platform: Platform{}, exp: "cis-1.3", callFn: withNoPath, v: viper.New(), succeed: false}, + {n: "no version-missing-kubectl", kubeVersion: "", benchmarkVersion: "", platform: Platform{}, v: viperWithData, exp: "cis-1.6", callFn: withNoPath, succeed: true}, + {n: "no version-fakeKubectl", kubeVersion: "", benchmarkVersion: "", platform: Platform{}, v: viperWithData, exp: "cis-1.6", callFn: withFakeKubectl, succeed: true}, + {n: "kubeVersion", kubeVersion: "1.15", benchmarkVersion: "", platform: Platform{}, v: viperWithData, exp: "cis-1.5", callFn: withNoPath, succeed: true}, + {n: "ocpVersion310", kubeVersion: "ocp-3.10", benchmarkVersion: "", platform: Platform{}, v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true}, + {n: "ocpVersion311", kubeVersion: "ocp-3.11", benchmarkVersion: "", platform: Platform{}, v: viperWithData, exp: "rh-0.7", callFn: withNoPath, succeed: true}, + {n: "gke12", kubeVersion: "gke-1.2.0", benchmarkVersion: "", platform: Platform{}, v: viperWithData, exp: "gke-1.2.0", callFn: withNoPath, succeed: true}, + } + for _, c := range cases { + rv, err := c.callFn(c.kubeVersion, c.benchmarkVersion, c.platform, c.v, getBenchmarkVersion) + if c.succeed { + if err != nil { + t.Errorf("[%q]-Unexpected error: %v", c.n, err) + } + + if len(rv) == 0 { + t.Errorf("[%q]-missing return value", c.n) + } + + if c.exp != rv { + t.Errorf("[%q]- expected %q but Got %q", c.n, c.exp, rv) + } + } else { + if err == nil { + t.Errorf("[%q]-Expected error but got none", c.n) + } + } + } +} + +func TestValidTargets(t *testing.T) { + viperWithData, err := loadConfigForTest() + if err != nil { + t.Fatalf("Unable to load config file %v", err) + } + cases := []struct { + name string + benchmark string + targets []string + expected bool + }{ + { + name: "cis-1.5 no dummy", + benchmark: "cis-1.5", + targets: []string{"master", "node", "controlplane", "etcd", "dummy"}, + expected: false, + }, + { + name: "cis-1.5 valid", + benchmark: "cis-1.5", + targets: []string{"master", "node", "controlplane", "etcd", "policies"}, + expected: true, + }, + { + name: "cis-1.6 no Pikachu", + benchmark: "cis-1.6", + targets: []string{"master", "node", "controlplane", "etcd", "Pikachu"}, + expected: false, + }, + { + name: "cis-1.6 valid", + benchmark: "cis-1.6", + targets: []string{"master", "node", "controlplane", "etcd", "policies"}, + expected: true, + }, + { + name: "gke-1.2.0 valid", + benchmark: "gke-1.2.0", + targets: []string{"master", "node", "controlplane", "policies", "managedservices"}, + expected: true, + }, + { + name: "aks-1.0 valid", + benchmark: "aks-1.0", + targets: []string{"node", "policies", "controlplane", "managedservices"}, + expected: true, + }, + { + name: "eks-1.0.1 valid", + benchmark: "eks-1.0.1", + targets: []string{"node", "policies", "controlplane", "managedservices"}, + expected: true, + }, + { + name: "eks-1.1.0 valid", + benchmark: "eks-1.1.0", + targets: []string{"node", "policies", "controlplane", "managedservices"}, + expected: true, + }, + { + name: "eks-1.2.0 valid", + benchmark: "eks-1.2.0", + targets: []string{"node", "policies", "controlplane", "managedservices"}, + expected: true, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ret, err := validTargets(c.benchmark, c.targets, viperWithData) + if err != nil { + t.Fatalf("Expected nil error, got: %v", err) + } + if ret != c.expected { + t.Fatalf("Expected %t, got %t", c.expected, ret) + } + }) + } +} + +func TestIsEtcd(t *testing.T) { + testCases := []struct { + name string + cfgFile string + getBinariesFunc func(*viper.Viper, check2.NodeType) (map[string]string, error) + isEtcd bool + }{ + { + name: "valid config, is etcd and all components are running", + cfgFile: "./kubebench-rules/config.yaml", + getBinariesFunc: func(viper *viper.Viper, nt check2.NodeType) (strings map[string]string, i error) { + return map[string]string{"etcd": "etcd"}, nil + }, + isEtcd: true, + }, + { + name: "valid config, is etcd and but not all components are running", + cfgFile: "./kubebench-rules/config.yaml", + getBinariesFunc: func(viper *viper.Viper, nt check2.NodeType) (strings map[string]string, i error) { + return map[string]string{}, nil + }, + isEtcd: false, + }, + { + name: "valid config, is etcd, not all components are running and fails to find all binaries", + cfgFile: "./kubebench-rules/config.yaml", + getBinariesFunc: func(viper *viper.Viper, nt check2.NodeType) (strings map[string]string, i error) { + return map[string]string{}, errors.New("failed to find binaries") + }, + isEtcd: false, + }, + } + cfgDirOld := cfgDir + cfgDir = "../cfg" + defer func() { + cfgDir = cfgDirOld + }() + + execCode := `#!/bin/sh + echo "Server Version: v1.15.03" + ` + restore, err := fakeExecutableInPath("kubectl", execCode) + if err != nil { + t.Fatal("Failed when calling fakeExecutableInPath ", err) + } + defer restore() + + for _, tc := range testCases { + func() { + cfgFile = tc.cfgFile + initConfig() + + oldGetBinariesFunc := getBinariesFunc + getBinariesFunc = tc.getBinariesFunc + defer func() { + getBinariesFunc = oldGetBinariesFunc + cfgFile = "" + }() + + assert.Equal(t, tc.isEtcd, isEtcd(), tc.name) + }() + } +} + +func TestWriteResultToJsonFile(t *testing.T) { + defer func() { + controlsCollection = []*check2.Controls{} + jsonFmt = false + outputFile = "" + }() + var err error + jsonFmt = true + outputFile = path.Join(os.TempDir(), fmt.Sprintf("%d", time.Now().UnixNano())) + + controlsCollection, err = parseControlsJsonFile("./testdata/controlsCollection.json") + if err != nil { + t.Error(err) + } + writeOutput(controlsCollection) + + var expect JsonOutputFormat + var result JsonOutputFormat + result, err = parseResultJsonFile(outputFile) + if err != nil { + t.Error(err) + } + expect, err = parseResultJsonFile("./testdata/result.json") + if err != nil { + t.Error(err) + } + + assert.Equal(t, expect, result) +} + +func TestWriteResultNoTotalsToJsonFile(t *testing.T) { + defer func() { + controlsCollection = []*check2.Controls{} + jsonFmt = false + outputFile = "" + }() + var err error + jsonFmt = true + outputFile = path.Join(os.TempDir(), fmt.Sprintf("%d", time.Now().UnixNano())) + + noTotals = true + + controlsCollection, err = parseControlsJsonFile("./testdata/controlsCollection.json") + if err != nil { + t.Error(err) + } + writeOutput(controlsCollection) + + var expect []*check2.Controls + var result []*check2.Controls + result, err = parseResultNoTotalsJsonFile(outputFile) + if err != nil { + t.Error(err) + } + expect, err = parseResultNoTotalsJsonFile("./testdata/result_no_totals.json") + if err != nil { + t.Error(err) + } + + assert.Equal(t, expect, result) +} + +func TestExitCodeSelection(t *testing.T) { + exitCode = 10 + controlsCollectionAllPassed, errPassed := parseControlsJsonFile("./testdata/passedControlsCollection.json") + if errPassed != nil { + t.Error(errPassed) + } + controlsCollectionWithFailures, errFailure := parseControlsJsonFile("./testdata/controlsCollection.json") + if errFailure != nil { + t.Error(errFailure) + } + + exitCodePassed := exitCodeSelection(controlsCollectionAllPassed) + assert.Equal(t, 0, exitCodePassed) + + exitCodeFailure := exitCodeSelection(controlsCollectionWithFailures) + assert.Equal(t, 10, exitCodeFailure) +} + +func TestGenerationDefaultEnvAudit(t *testing.T) { + input := []byte(` +--- +type: "master" +groups: +- id: G1 + checks: + - id: G1/C1 +- id: G2 + checks: + - id: G2/C1 + text: "Verify that the SomeSampleFlag argument is set to true" + audit: "grep -B1 SomeSampleFlag=true /this/is/a/file/path" + tests: + test_items: + - flag: "SomeSampleFlag=true" + env: "SOME_SAMPLE_FLAG" + compare: + op: has + value: "true" + set: true + remediation: | + Edit the config file /this/is/a/file/path and set SomeSampleFlag to true. + scored: true +`) + controls, err := check2.NewControls(check2.MASTER, input, "") + assert.NoError(t, err) + + binSubs := []string{"TestBinPath"} + generateDefaultEnvAudit(controls, binSubs) + + expectedAuditEnv := fmt.Sprintf("cat \"/proc/$(/bin/ps -C %s -o pid= | tr -d ' ')/environ\" | tr '\\0' '\\n'", binSubs[0]) + assert.Equal(t, expectedAuditEnv, controls.Groups[1].Checks[0].AuditEnv) +} + +func TestGetSummaryTotals(t *testing.T) { + controlsCollection, err := parseControlsJsonFile("./testdata/controlsCollection.json") + if err != nil { + t.Error(err) + } + + resultTotals := getSummaryTotals(controlsCollection) + assert.Equal(t, 12, resultTotals.Fail) + assert.Equal(t, 14, resultTotals.Warn) + assert.Equal(t, 0, resultTotals.Info) + assert.Equal(t, 49, resultTotals.Pass) +} + +func TestPrintSummary(t *testing.T) { + controlsCollection, err := parseControlsJsonFile("./testdata/controlsCollection.json") + if err != nil { + t.Error(err) + } + + resultTotals := getSummaryTotals(controlsCollection) + rescueStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + printSummary(resultTotals, "totals") + w.Close() + out, _ := ioutil.ReadAll(r) + os.Stdout = rescueStdout + + assert.Contains(t, string(out), "49 checks PASS\n12 checks FAIL\n14 checks WARN\n0 checks INFO\n\n") +} + +func TestPrettyPrintNoSummary(t *testing.T) { + controlsCollection, err := parseControlsJsonFile("./testdata/controlsCollection.json") + if err != nil { + t.Error(err) + } + + resultTotals := getSummaryTotals(controlsCollection) + rescueStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + noSummary = true + prettyPrint(controlsCollection[0], resultTotals) + w.Close() + out, _ := ioutil.ReadAll(r) + os.Stdout = rescueStdout + + assert.NotContains(t, string(out), "49 checks PASS") +} + +func TestPrettyPrintSummary(t *testing.T) { + controlsCollection, err := parseControlsJsonFile("./testdata/controlsCollection.json") + if err != nil { + t.Error(err) + } + + resultTotals := getSummaryTotals(controlsCollection) + rescueStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + noSummary = false + prettyPrint(controlsCollection[0], resultTotals) + w.Close() + out, _ := ioutil.ReadAll(r) + os.Stdout = rescueStdout + + assert.Contains(t, string(out), "49 checks PASS") +} + +func TestWriteStdoutOutputNoTotal(t *testing.T) { + controlsCollection, err := parseControlsJsonFile("./testdata/controlsCollection.json") + if err != nil { + t.Error(err) + } + + rescueStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + noTotals = true + writeStdoutOutput(controlsCollection) + w.Close() + out, _ := ioutil.ReadAll(r) + os.Stdout = rescueStdout + + assert.NotContains(t, string(out), "49 checks PASS") +} + +func TestWriteStdoutOutputTotal(t *testing.T) { + controlsCollection, err := parseControlsJsonFile("./testdata/controlsCollection.json") + if err != nil { + t.Error(err) + } + + rescueStdout := os.Stdout + + r, w, _ := os.Pipe() + + os.Stdout = w + noTotals = false + writeStdoutOutput(controlsCollection) + w.Close() + out, _ := ioutil.ReadAll(r) + + os.Stdout = rescueStdout + + assert.Contains(t, string(out), "49 checks PASS") +} + +func parseControlsJsonFile(filepath string) ([]*check2.Controls, error) { + var result []*check2.Controls + + d, err := ioutil.ReadFile(filepath) + if err != nil { + return nil, err + } + err = json.Unmarshal(d, &result) + if err != nil { + return nil, err + } + + return result, nil +} + +func parseResultJsonFile(filepath string) (JsonOutputFormat, error) { + var result JsonOutputFormat + + d, err := ioutil.ReadFile(filepath) + if err != nil { + return result, err + } + err = json.Unmarshal(d, &result) + if err != nil { + return result, err + } + + return result, nil +} + +func parseResultNoTotalsJsonFile(filepath string) ([]*check2.Controls, error) { + var result []*check2.Controls + + d, err := ioutil.ReadFile(filepath) + if err != nil { + return nil, err + } + err = json.Unmarshal(d, &result) + if err != nil { + return nil, err + } + + return result, nil +} + +func loadConfigForTest() (*viper.Viper, error) { + viperWithData := viper.New() + viperWithData.SetConfigFile("./kubebench-rules/config.yaml") + if err := viperWithData.ReadInConfig(); err != nil { + return nil, err + } + return viperWithData, nil +} + +type restoreFn func() + +func fakeExecutableInPath(execFile, execCode string) (restoreFn, error) { + pathenv := os.Getenv("PATH") + tmp, err := ioutil.TempDir("", "TestfakeExecutableInPath") + if err != nil { + return nil, err + } + + wd, err := os.Getwd() + if err != nil { + return nil, err + } + + if len(execCode) > 0 { + ioutil.WriteFile(filepath.Join(tmp, execFile), []byte(execCode), 0700) + } else { + f, err := os.OpenFile(execFile, os.O_CREATE|os.O_EXCL, 0700) + if err != nil { + return nil, err + } + err = f.Close() + if err != nil { + return nil, err + } + } + + err = os.Setenv("PATH", fmt.Sprintf("%s:%s", tmp, pathenv)) + if err != nil { + return nil, err + } + + restorePath := func() { + os.RemoveAll(tmp) + os.Chdir(wd) + os.Setenv("PATH", pathenv) + } + + return restorePath, nil +} + +func prunePath() (restoreFn, error) { + pathenv := os.Getenv("PATH") + err := os.Setenv("PATH", "") + if err != nil { + return nil, err + } + restorePath := func() { + os.Setenv("PATH", pathenv) + } + return restorePath, nil +} diff --git a/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/config.yaml b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/config.yaml new file mode 100644 index 00000000..b7839455 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/controlplane.yaml b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/controlplane.yaml new file mode 100644 index 00000000..49aa9c72 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/controlplane.yaml @@ -0,0 +1,31 @@ +--- +controls: +version: "aks-1.3" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Logging" + checks: + - id: 2.1.1 + text: "Enable audit Logs" + type: "manual" + remediation: | + Azure audit logs are enabled and managed in the Azure portal. To enable log collection for + the Kubernetes master components in your AKS cluster, open the Azure portal in a web + browser and complete the following steps: + 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't + select the resource group that contains your individual AKS cluster resources, such + as MC_myResourceGroup_myAKSCluster_eastus. + 2. On the left-hand side, choose Diagnostic settings. + 3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting. + 4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics. + 5. Select an existing workspace or create a new one. If you create a workspace, provide + a workspace name, a resource group, and a location. + 6. In the list of available logs, select the logs you wish to enable. For this example, + enable the kube-audit and kube-audit-admin logs. Common logs include the kube- + apiserver, kube-controller-manager, and kube-scheduler. You can return and change + the collected logs once Log Analytics workspaces are enabled. + 7. When ready, select Save to enable collection of the selected logs. + scored: false diff --git a/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/managedservices.yaml b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/managedservices.yaml new file mode 100644 index 00000000..bae78ab4 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/managedservices.yaml @@ -0,0 +1,144 @@ +--- +controls: +version: "aks-1.3" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.1.2 + text: "Minimize user access to Azure Container Registry (ACR) (Manual)" + type: "manual" + remediation: | + Azure Container Registry + If you use Azure Container Registry (ACR) as your container image store, you need to grant + permissions to the service principal for your AKS cluster to read and pull images. Currently, + the recommended configuration is to use the az aks create or az aks update command to + integrate with a registry and assign the appropriate role for the service principal. For + detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes + Service. + To avoid needing an Owner or Azure account administrator role, you can configure a + service principal manually or use an existing service principal to authenticate ACR from + AKS. For more information, see ACR authentication with service principals or Authenticate + from Kubernetes with a pull secret. + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Azure Container Registry (ACR) (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.2 + text: "Access and identity options for Azure Kubernetes Service (AKS)" + checks: + - id: 5.2.1 + text: "Prefer using dedicated AKS Service Accounts (Manual)" + type: "manual" + remediation: | + Azure Active Directory integration + The security of AKS clusters can be enhanced with the integration of Azure Active Directory + (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, + cloud-based directory, and identity management service that combines core directory + services, application access management, and identity protection. With Azure AD, you can + integrate on-premises identities into AKS clusters to provide a single source for account + management and security. + Azure Active Directory integration with AKS clusters + With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes + resources within a namespace or across the cluster. To obtain a kubectl configuration + context, a user can run the az aks get-credentials command. When a user then interacts + with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD + credentials. This approach provides a single source for user account management and + password credentials. The user can only access the resources as defined by the cluster + administrator. + Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect + is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID + Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, + Webhook Token Authentication is used to verify authentication tokens. Webhook token + authentication is configured and managed as part of the AKS cluster. + scored: false + + - id: 5.3 + text: "Key Management Service (KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4 + text: "Cluster Networking" + checks: + - id: 5.4.1 + text: "Restrict Access to the Control Plane Endpoint (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.2 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.3 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.4 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.5 + text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + + - id: 5.5 + text: "Authentication and Authorization" + checks: + - id: 5.5.1 + text: "Manage Kubernetes RBAC users with Azure AD (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + - id: 5.5.2 + text: "Use Azure RBAC for Kubernetes Authorization (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.6 + text: "Other Cluster Configurations" + checks: + - id: 5.6.1 + text: "Restrict untrusted workloads (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + - id: 5.6.2 + text: "Hostile multi-tenant workloads (Manual)" + type: "manual" + remediation: "No remediation" + scored: false diff --git a/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/master.yaml b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/master.yaml new file mode 100644 index 00000000..74330d4b --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "aks-1.3" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/node.yaml b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/node.yaml new file mode 100644 index 00000000..b4442432 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/node.yaml @@ -0,0 +1,298 @@ +--- +controls: +version: "aks-1.3" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.8 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.9 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.10 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/policies.yaml b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/policies.yaml new file mode 100644 index 00000000..22c61e8e --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/aks-1.3/policies.yaml @@ -0,0 +1,206 @@ +--- +controls: +version: "aks-1.3" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.2 + text: "Pod Security Standards" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of privileged containers. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostPID containers. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostIPC containers. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostNetwork containers. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of containers with .spec.allowPrivilegeEscalation set to true. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, + ensuring that either MustRunAsNonRoot or MustRunAs + with the range of UIDs not including 0, is set. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "Azure Policy / OPA" + checks: [] + + - id: 4.4 + text: "CNI Plugin" + checks: + - id: 4.4.1 + text: "Ensure that the latest CNI version is used (Manual)" + type: "manual" + remediation: | + Review the documentation of AWS CNI plugin, and ensure latest CNI version is used. + scored: false + + - id: 4.4.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.5 + text: "Secrets Management" + checks: + - id: 4.5.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.5.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.6 + text: "Extensible Admission Control" + checks: + - id: 4.6.1 + text: "Verify that admission controllers are working as expected (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 4.7 + text: "General Policies" + checks: + - id: 4.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.7.2 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.7.3 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/kvisor/kubebench/kubebench-rules/config.yaml b/cmd/kvisor/kubebench/kubebench-rules/config.yaml new file mode 100644 index 00000000..f4eb3b4f --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/config.yaml @@ -0,0 +1,402 @@ +--- +## Controls Files. +# These are YAML files that hold all the details for running checks. +# +## Uncomment to use different control file paths. +# masterControls: ./cfg/master.yaml +# nodeControls: ./cfg/node.yaml + +master: + components: + - apiserver + - scheduler + - controllermanager + - etcd + - flanneld + # kubernetes is a component to cover the config file /etc/kubernetes/config that is referred to in the benchmark + - kubernetes + - kubelet + + kubernetes: + defaultconf: /etc/kubernetes/config + + apiserver: + bins: + - "kube-apiserver" + - "hyperkube apiserver" + - "hyperkube kube-apiserver" + - "apiserver" + - "openshift start master api" + - "hypershift openshift-kube-apiserver" + confs: + - /etc/kubernetes/manifests/kube-apiserver.yaml + - /etc/kubernetes/manifests/kube-apiserver.yml + - /etc/kubernetes/manifests/kube-apiserver.manifest + - /var/snap/kube-apiserver/current/args + - /var/snap/microk8s/current/args/kube-apiserver + - /etc/origin/master/master-config.yaml + - /etc/kubernetes/manifests/talos-kube-apiserver.yaml + defaultconf: /etc/kubernetes/manifests/kube-apiserver.yaml + + scheduler: + bins: + - "kube-scheduler" + - "hyperkube scheduler" + - "hyperkube kube-scheduler" + - "scheduler" + - "openshift start master controllers" + confs: + - /etc/kubernetes/manifests/kube-scheduler.yaml + - /etc/kubernetes/manifests/kube-scheduler.yml + - /etc/kubernetes/manifests/kube-scheduler.manifest + - /var/snap/kube-scheduler/current/args + - /var/snap/microk8s/current/args/kube-scheduler + - /etc/origin/master/scheduler.json + - /etc/kubernetes/manifests/talos-kube-scheduler.yaml + defaultconf: /etc/kubernetes/manifests/kube-scheduler.yaml + kubeconfig: + - /etc/kubernetes/scheduler.conf + - /var/lib/kube-scheduler/kubeconfig + - /var/lib/kube-scheduler/config.yaml + - /system/secrets/kubernetes/kube-scheduler/kubeconfig + defaultkubeconfig: /etc/kubernetes/scheduler.conf + + controllermanager: + bins: + - "kube-controller-manager" + - "kube-controller" + - "hyperkube controller-manager" + - "hyperkube kube-controller-manager" + - "controller-manager" + - "openshift start master controllers" + - "hypershift openshift-controller-manager" + confs: + - /etc/kubernetes/manifests/kube-controller-manager.yaml + - /etc/kubernetes/manifests/kube-controller-manager.yml + - /etc/kubernetes/manifests/kube-controller-manager.manifest + - /var/snap/kube-controller-manager/current/args + - /var/snap/microk8s/current/args/kube-controller-manager + - /etc/kubernetes/manifests/talos-kube-controller-manager.yaml + defaultconf: /etc/kubernetes/manifests/kube-controller-manager.yaml + kubeconfig: + - /etc/kubernetes/controller-manager.conf + - /var/lib/kube-controller-manager/kubeconfig + - /system/secrets/kubernetes/kube-controller-manager/kubeconfig + defaultkubeconfig: /etc/kubernetes/controller-manager.conf + + etcd: + optional: true + bins: + - "etcd" + - "openshift start etcd" + datadirs: + - /var/lib/etcd/default.etcd + - /var/lib/etcd/data.etcd + confs: + - /etc/kubernetes/manifests/etcd.yaml + - /etc/kubernetes/manifests/etcd.yml + - /etc/kubernetes/manifests/etcd.manifest + - /etc/etcd/etcd.conf + - /var/snap/etcd/common/etcd.conf.yml + - /var/snap/etcd/common/etcd.conf.yaml + - /var/snap/microk8s/current/args/etcd + - /usr/lib/systemd/system/etcd.service + defaultconf: /etc/kubernetes/manifests/etcd.yaml + defaultdatadir: /var/lib/etcd/default.etcd + + flanneld: + optional: true + bins: + - flanneld + defaultconf: /etc/sysconfig/flanneld + + kubelet: + optional: true + bins: + - "hyperkube kubelet" + - "kubelet" + +node: + components: + - kubelet + - proxy + # kubernetes is a component to cover the config file /etc/kubernetes/config that is referred to in the benchmark + - kubernetes + + kubernetes: + defaultconf: "/etc/kubernetes/config" + + kubelet: + cafile: + - "/etc/kubernetes/pki/ca.crt" + - "/etc/kubernetes/certs/ca.crt" + - "/etc/kubernetes/cert/ca.pem" + - "/var/snap/microk8s/current/certs/ca.crt" + svc: + # These paths must also be included + # in the 'confs' property below + - "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" + - "/etc/systemd/system/kubelet.service" + - "/lib/systemd/system/kubelet.service" + - "/etc/systemd/system/snap.kubelet.daemon.service" + - "/etc/systemd/system/snap.microk8s.daemon-kubelet.service" + - "/etc/systemd/system/atomic-openshift-node.service" + - "/etc/systemd/system/origin-node.service" + bins: + - "hyperkube kubelet" + - "kubelet" + kubeconfig: + - "/etc/kubernetes/kubelet.conf" + - "/etc/kubernetes/kubelet-kubeconfig.conf" + - "/var/lib/kubelet/kubeconfig" + - "/etc/kubernetes/kubelet-kubeconfig" + - "/etc/kubernetes/kubelet/kubeconfig" + - "/var/snap/microk8s/current/credentials/kubelet.config" + - "/etc/kubernetes/kubeconfig-kubelet" + confs: + - "/home/kubernetes/kubelet-config.yaml" + - "/home/kubernetes/kubelet-config.yml" + - "/etc/kubernetes/kubelet-config.yaml" + - "/var/lib/kubelet/config.yaml" + - "/var/lib/kubelet/config.yml" + - "/etc/kubernetes/kubelet/kubelet-config.json" + - "/etc/kubernetes/kubelet/config" + - "/etc/default/kubeletconfig.json" + - "/etc/default/kubelet" + - "/var/lib/kubelet/kubeconfig" + - "/var/snap/kubelet/current/args" + - "/var/snap/microk8s/current/args/kubelet" + ## Due to the fact that the kubelet might be configured + ## without a kubelet-config file, we use a work-around + ## of pointing to the systemd service file (which can also + ## hold kubelet configuration). + ## Note: The following paths must match the one under 'svc' + - "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" + - "/etc/systemd/system/kubelet.service" + - "/lib/systemd/system/kubelet.service" + - "/etc/systemd/system/snap.kubelet.daemon.service" + - "/etc/systemd/system/snap.microk8s.daemon-kubelet.service" + - "/etc/kubernetes/kubelet.yaml" + defaultconf: "/home/kubernetes/kubelet-config.yaml" + defaultsvc: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" + defaultkubeconfig: "/etc/kubernetes/kubelet.conf" + defaultcafile: "/etc/kubernetes/pki/ca.crt" + + proxy: + optional: true + bins: + - "kube-proxy" + - "hyperkube proxy" + - "hyperkube kube-proxy" + - "proxy" + - "openshift start network" + confs: + - /etc/kubernetes/proxy + - /etc/kubernetes/addons/kube-proxy-daemonset.yaml + - /etc/kubernetes/addons/kube-proxy-daemonset.yml + - /var/snap/kube-proxy/current/args + - /var/snap/microk8s/current/args/kube-proxy + kubeconfig: + - "/etc/kubernetes/kubelet-kubeconfig" + - "/etc/kubernetes/kubelet-kubeconfig.conf" + - "/etc/kubernetes/kubelet/config" + - "/var/lib/kubelet/kubeconfig" + - "/var/snap/microk8s/current/credentials/proxy.config" + svc: + - "/lib/systemd/system/kube-proxy.service" + - "/etc/systemd/system/snap.microk8s.daemon-proxy.service" + defaultconf: /etc/kubernetes/addons/kube-proxy-daemonset.yaml + defaultkubeconfig: "/etc/kubernetes/proxy.conf" + +etcd: + components: + - etcd + + etcd: + bins: + - "etcd" + datadirs: + - /var/lib/etcd/default.etcd + - /var/lib/etcd/data.etcd + confs: + - /etc/kubernetes/manifests/etcd.yaml + - /etc/kubernetes/manifests/etcd.yml + - /etc/kubernetes/manifests/etcd.manifest + - /etc/etcd/etcd.conf + - /var/snap/etcd/common/etcd.conf.yml + - /var/snap/etcd/common/etcd.conf.yaml + - /var/snap/microk8s/current/args/etcd + - /usr/lib/systemd/system/etcd.service + defaultconf: /etc/kubernetes/manifests/etcd.yaml + defaultdatadir: /var/lib/etcd/default.etcd + +controlplane: + components: + - apiserver + + apiserver: + bins: + - "kube-apiserver" + - "hyperkube apiserver" + - "hyperkube kube-apiserver" + - "apiserver" + +policies: + components: [] + +managedservices: + components: [] + +version_mapping: + "1.15": "cis-1.5" + "1.16": "cis-1.6" + "1.17": "cis-1.6" + "1.18": "cis-1.6" + "1.19": "cis-1.20" + "1.20": "cis-1.20" + "1.21": "cis-1.20" + "1.22": "cis-1.23" + "1.23": "cis-1.23" + "1.24": "cis-1.24" + "1.25": "cis-1.7" + "eks-1.0.1": "eks-1.0.1" + "eks-1.1.0": "eks-1.1.0" + "eks-1.2.0": "eks-1.2.0" + "eks-1.3.0": "eks-1.3.0" + "gke-1.0": "gke-1.0" + "gke-1.2.0": "gke-1.2.0" + "gke-1.4.0": "gke-1.4.0" + "ocp-3.10": "rh-0.7" + "ocp-3.11": "rh-0.7" + "ocp-4.0": "rh-1.0" + "aks-1.0": "aks-1.0" + "aks-1.3": "aks-1.3" + "ack-1.0": "ack-1.0" + "cis-1.6-k3s": "cis-1.6-k3s" + "tkgi-1.2.53": "tkgi-1.2.53" + +target_mapping: + "cis-1.5": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + "cis-1.6": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + "cis-1.6-k3s": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + "cis-1.20": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + "cis-1.23": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + "cis-1.24": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + "cis-1.7": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + "gke-1.0": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + - "managedservices" + "gke-1.2.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "gke-1.4.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "eks-1.0.1": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "eks-1.1.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "eks-1.2.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "eks-1.3.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "rh-0.7": + - "master" + - "node" + "aks-1.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "aks-1.3": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "ack-1.0": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + - "managedservices" + "rh-1.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "etcd" + "eks-stig-kubernetes-v1r6": + - "node" + - "controlplane" + - "policies" + - "managedservices" + "tkgi-1.2.53": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" diff --git a/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/config.yaml b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/config.yaml new file mode 100644 index 00000000..17301a75 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml +## These settings are required if you are using the --asff option to report findings to AWS Security Hub +## AWS account number is required. +AWS_ACCOUNT: "" +## AWS region is required. +AWS_REGION: "" +## EKS Cluster ARN is required. +CLUSTER_ARN: "" diff --git a/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/controlplane.yaml b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/controlplane.yaml new file mode 100644 index 00000000..97499092 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/controlplane.yaml @@ -0,0 +1,14 @@ +--- +controls: +version: "eks-1.3.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Logging" + checks: + - id: 2.1.1 + text: "Enable audit logs (Automated)" + remediation: "Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler." + scored: false diff --git a/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/managedservices.yaml b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/managedservices.yaml new file mode 100644 index 00000000..cf41ca24 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/managedservices.yaml @@ -0,0 +1,154 @@ +--- +controls: +version: "eks-1.3.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third-party provider (Automated)" + type: "manual" + remediation: | + To utilize AWS ECR for Image scanning please follow the steps below: + + To create a repository configured for scan on push (AWS CLI): + aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + To edit the settings of an existing repository (AWS CLI): + aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + Use the following steps to start a manual image scan using the AWS Management Console. + Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories. + From the navigation bar, choose the Region to create your repository in. + In the navigation pane, choose Repositories. + On the Repositories page, choose the repository that contains the image to scan. + On the Images page, select the image to scan and then choose Scan. + scored: false + + - id: 5.1.2 + text: "Minimize user access to Amazon ECR (Manual)" + type: "manual" + remediation: | + Before you use IAM to manage access to Amazon ECR, you should understand what IAM features + are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other + AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide. + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Amazon ECR (Manual)" + type: "manual" + remediation: | + You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites. + + The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess + the following IAM policy permissions for Amazon ECR. + + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + } + ] + } + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Prefer using dedicated Amazon EKS Service Accounts (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.3 + text: "AWS EKS Key Management Service (KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)" + type: "manual" + remediation: | + This process can only be performed during Cluster Creation. + + Enable 'Secrets Encryption' during Amazon EKS cluster creation as described + in the links within the 'References' section. + scored: false + + - id: 5.4 + text: "Cluster Networking" + checks: + - id: 5.4.1 + text: "Restrict Access to the Control Plane Endpoint (Automated)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.2 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Automated)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.3 + text: "Ensure clusters are created with Private Nodes (Automated)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.4 + text: "Ensure Network Policy is Enabled and set as appropriate (Automated)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.5 + text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + + - id: 5.5 + text: "Authentication and Authorization" + checks: + - id: 5.5.1 + text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 or greater (Manual)" + type: "manual" + remediation: | + Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation. + scored: false + + + - id: 5.6 + text: "Other Cluster Configurations" + checks: + - id: 5.6.1 + text: "Consider Fargate for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Create a Fargate profile for your cluster Before you can schedule pods running on Fargate + in your cluster, you must define a Fargate profile that specifies which pods should use + Fargate when they are launched. For more information, see AWS Fargate profile. + + Note: If you created your cluster with eksctl using the --fargate option, then a Fargate profile has + already been created for your cluster with selectors for all pods in the kube-system + and default namespaces. Use the following procedure to create Fargate profiles for + any other namespaces you would like to use with Fargate. + scored: false diff --git a/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/master.yaml b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/master.yaml new file mode 100644 index 00000000..3e8a0179 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "eks-1.3.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/node.yaml b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/node.yaml new file mode 100644 index 00000000..404060f8 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/node.yaml @@ -0,0 +1,307 @@ +--- +controls: +version: "eks-1.3.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the Anonymous Auth is Not Enabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that a Client CA File is Configured (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.4 + text: "Ensure that the --read-only-port is disabled (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.8 + text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: gte + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.9 + text: "Ensure that the --rotate-certificates argument is not present or is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.10 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: 3.3 + text: "Container Optimized OS" + checks: + - id: 3.3.1 + text: "Prefer using a container-optimized OS when possible (Manual)" + remediation: "No remediation" + scored: false diff --git a/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/policies.yaml b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/policies.yaml new file mode 100644 index 00000000..fca60241 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/eks-1.3.0/policies.yaml @@ -0,0 +1,208 @@ +--- +controls: +version: "eks-1.3.0" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 4.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 4.2 + text: "Pod Security Standards" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of privileged containers. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostPID containers. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostIPC containers. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostNetwork containers. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of containers with .spec.allowPrivilegeEscalation set to true. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring + that either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0, is set. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabilities to operate consider adding + a policy which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "CNI Plugin" + checks: + - id: 4.3.1 + text: "Ensure CNI plugin supports network policies (Manual)" + type: "manual" + remediation: | + As with RBAC policies, network policies should adhere to the policy of least privileged + access. Start by creating a deny all policy that restricts all inbound and outbound traffic + from a namespace or create a global policy using Calico. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "General Policies" + checks: + - id: 4.5.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.5.2 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.5.3 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/config.yaml b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/config.yaml new file mode 100644 index 00000000..b7839455 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/controlplane.yaml b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/controlplane.yaml new file mode 100644 index 00000000..65da37b3 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/controlplane.yaml @@ -0,0 +1,35 @@ +--- +controls: +version: "gke-1.4.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Authentication and Authorization" + checks: + - id: 2.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + You can remediate the availability of client certificates in your GKE cluster. See + Recommendation 6.8.2. + scored: false + + - id: 2.2 + text: "Logging" + type: skip + checks: + - id: 2.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + type: "manual" + remediation: "This control cannot be modified in GKE." + scored: false + + - id: 2.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: "This control cannot be modified in GKE." + scored: false diff --git a/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/managedservices.yaml b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/managedservices.yaml new file mode 100644 index 00000000..a34d2845 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/managedservices.yaml @@ -0,0 +1,706 @@ +--- +controls: +version: "gke-1.4.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using GCR Container Analysis + or a third-party provider (Automated)" + type: "manual" + remediation: | + Using Command Line: + + gcloud services enable containerscanning.googleapis.com + scored: false + + - id: 5.1.2 + text: "Minimize user access to GCR (Manual)" + type: "manual" + remediation: | + Using Command Line: + To change roles at the GCR bucket level: + Firstly, run the following if read permissions are required: + + gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer + gs://artifacts.[PROJECT_ID].appspot.com + + Then remove the excessively privileged role (Storage Admin / Storage Object Admin / + Storage Object Creator) using: + + gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE] + gs://artifacts.[PROJECT_ID].appspot.com + + where: + [TYPE] can be one of the following: + o user, if the [EMAIL-ADDRESS] is a Google account + o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account + [EMAIL-ADDRESS] can be one of the following: + o a Google account (for example, someone@example.com) + o a Cloud IAM service account + To modify roles defined at the project level and subsequently inherited within the GCR + bucket, or the Service Account User role, extract the IAM policy file, modify it accordingly + and apply it using: + + gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE] + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for GCR (Manual)" + type: "manual" + remediation: | + Using Command Line: + For an account explicitly granted to the bucket. First, add read access to the Kubernetes + Service Account + + gsutil iam ch [TYPE]:[EMAIL-ADDRESS]:objectViewer + gs://artifacts.[PROJECT_ID].appspot.com + + where: + [TYPE] can be one of the following: + o user, if the [EMAIL-ADDRESS] is a Google account + o serviceAccount, if [EMAIL-ADDRESS] specifies a Service account + [EMAIL-ADDRESS] can be one of the following: + o a Google account (for example, someone@example.com) + o a Cloud IAM service account + + Then remove the excessively privileged role (Storage Admin / Storage Object Admin / + Storage Object Creator) using: + + gsutil iam ch -d [TYPE]:[EMAIL-ADDRESS]:[ROLE] + gs://artifacts.[PROJECT_ID].appspot.com + + For an account that inherits access to the GCR Bucket through Project level permissions, + modify the Projects IAM policy file accordingly, then upload it using: + + gcloud projects set-iam-policy [PROJECT_ID] [POLICY_FILE] + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: | + Using Command Line: + First, update the cluster to enable Binary Authorization: + + gcloud container cluster update [CLUSTER_NAME] \ + --enable-binauthz + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference + (https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for guidance. + Import the policy file into Binary Authorization: + + gcloud container binauthz policy import [YAML_POLICY] + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Ensure GKE clusters are not running using the Compute Engine + default service account (Automated)" + type: "manual" + remediation: | + Using Command Line: + Firstly, create a minimally privileged service account: + + gcloud iam service-accounts create [SA_NAME] \ + --display-name "GKE Node Service Account" + export NODE_SA_EMAIL=`gcloud iam service-accounts list \ + --format='value(email)' \ + --filter='displayName:GKE Node Service Account'` + + Grant the following roles to the service account: + + export PROJECT_ID=`gcloud config get-value project` + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member serviceAccount:$NODE_SA_EMAIL \ + --role roles/monitoring.metricWriter + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member serviceAccount:$NODE_SA_EMAIL \ + --role roles/monitoring.viewer + gcloud projects add-iam-policy-binding $PROJECT_ID \ + --member serviceAccount:$NODE_SA_EMAIL \ + --role roles/logging.logWriter + + To create a new Node pool using the Service account, run the following command: + + gcloud container node-pools create [NODE_POOL] \ + --service-account=[SA_NAME]@[PROJECT_ID].iam.gserviceaccount.com \ + --cluster=[CLUSTER_NAME] --zone [COMPUTE_ZONE] + + You will need to migrate your workloads to the new Node pool, and delete Node pools that + use the default service account to complete the remediation. + scored: false + + - id: 5.2.2 + text: "Prefer using dedicated GCP Service Accounts and Workload Identity (Manual)" + type: "manual" + remediation: | + Using Command Line: + + gcloud beta container clusters update [CLUSTER_NAME] --zone [CLUSTER_ZONE] \ + --identity-namespace=[PROJECT_ID].svc.id.goog + + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER . + + Then, modify existing Node pools to enable GKE_METADATA_SERVER: + + gcloud beta container node-pools update [NODEPOOL_NAME] \ + --cluster=[CLUSTER_NAME] --zone [CLUSTER_ZONE] \ + --workload-metadata-from-node=GKE_METADATA_SERVER + + You may also need to modify workloads in order for them to use Workload Identity as + described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload- + identity. Also consider the effects on the availability of your hosted workloads as Node + pools are updated, it may be more appropriate to create new Node Pools. + scored: false + + - id: 5.3 + text: "Cloud Key Management Service (Cloud KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a key + + Create a key ring: + + gcloud kms keyrings create [RING_NAME] \ + --location [LOCATION] \ + --project [KEY_PROJECT_ID] + + Create a key: + + gcloud kms keys create [KEY_NAME] \ + --location [LOCATION] \ + --keyring [RING_NAME] \ + --purpose encryption \ + --project [KEY_PROJECT_ID] + + Grant the Kubernetes Engine Service Agent service account the Cloud KMS CryptoKey + Encrypter/Decrypter role: + + gcloud kms keys add-iam-policy-binding [KEY_NAME] \ + --location [LOCATION] \ + --keyring [RING_NAME] \ + --member serviceAccount:[SERVICE_ACCOUNT_NAME] \ + --role roles/cloudkms.cryptoKeyEncrypterDecrypter \ + --project [KEY_PROJECT_ID] + + To create a new cluster with Application-layer Secrets Encryption: + + gcloud container clusters create [CLUSTER_NAME] \ + --cluster-version=latest \ + --zone [ZONE] \ + --database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \ + --project [CLUSTER_PROJECT_ID] + + To enable on an existing cluster: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [ZONE] \ + --database-encryption-key projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKey s/[KEY_NAME] \ + --project [CLUSTER_PROJECT_ID] + scored: false + + - id: 5.4 + text: "Node Metadata" + checks: + - id: 5.4.1 + text: "Ensure legacy Compute Engine instance metadata APIs are Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To update an existing cluster, create a new Node pool with the legacy GCE metadata + endpoint disabled: + + gcloud container node-pools create [POOL_NAME] \ + --metadata disable-legacy-endpoints=true \ + --cluster [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] + + You will need to migrate workloads from any existing non-conforming Node pools, to the + new Node pool, then delete non-conforming Node pools to complete the remediation. + scored: false + + - id: 5.4.2 + text: "Ensure the GKE Metadata Server is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + gcloud beta container clusters update [CLUSTER_NAME] \ + --identity-namespace=[PROJECT_ID].svc.id.goog + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER . + + To modify an existing Node pool to enable GKE Metadata Server: + + gcloud beta container node-pools update [NODEPOOL_NAME] \ + --cluster=[CLUSTER_NAME] \ + --workload-metadata-from-node=GKE_METADATA_SERVER + + You may also need to modify workloads in order for them to use Workload Identity as + described within https://cloud.google.com/kubernetes-engine/docs/how-to/workload- + identity. + scored: false + + - id: 5.5 + text: "Node Configuration and Maintenance" + checks: + - id: 5.5.1 + text: "Ensure Container-Optimized OS (cos_containerd) is used for GKE node images (Automated)" + type: "manual" + remediation: | + Using Command Line: + To set the node image to cos for an existing cluster's Node pool: + + gcloud container clusters upgrade [CLUSTER_NAME]\ + --image-type cos \ + --zone [COMPUTE_ZONE] --node-pool [POOL_NAME] + scored: false + + - id: 5.5.2 + text: "Ensure Node Auto-Repair is enabled for GKE nodes (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable node auto-repair for an existing cluster with Node pool, run the following + command: + + gcloud container node-pools update [POOL_NAME] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --enable-autorepair + scored: false + + - id: 5.5.3 + text: "Ensure Node Auto-Upgrade is enabled for GKE nodes (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable node auto-upgrade for an existing cluster's Node pool, run the following + command: + + gcloud container node-pools update [NODE_POOL] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --enable-autoupgrade + scored: false + + - id: 5.5.4 + text: "When creating New Clusters - Automate GKE version management using Release Channels (Manual)" + type: "manual" + remediation: | + Using Command Line: + Create a new cluster by running the following command: + + gcloud beta container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --release-channel [RELEASE_CHANNEL] + + where [RELEASE_CHANNEL] is stable or regular according to your needs. + scored: false + + - id: 5.5.5 + text: "Ensure Shielded GKE Nodes are Enabled (Manual)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Integrity Monitoring enabled, run the + following command: + + gcloud beta container node-pools create [NODEPOOL_NAME] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --shielded-integrity-monitoring + + You will also need to migrate workloads from existing non-conforming Node pools to the + newly created Node pool, then delete the non-conforming pools. + scored: false + + - id: 5.5.6 + text: "Ensure Integrity Monitoring for Shielded GKE Nodes is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Integrity Monitoring enabled, run the + following command: + + gcloud beta container node-pools create [NODEPOOL_NAME] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --shielded-integrity-monitoring + + You will also need to migrate workloads from existing non-conforming Node pools to the newly created Node pool, + then delete the non-conforming pools. + scored: false + + - id: 5.5.7 + text: "Ensure Secure Boot for Shielded GKE Nodes is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Secure Boot enabled, run the following + command: + + gcloud beta container node-pools create [NODEPOOL_NAME] \ + --cluster [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --shielded-secure-boot + + You will also need to migrate workloads from existing non-conforming Node pools to the + newly created Node pool, then delete the non-conforming pools. + scored: false + + - id: 5.6 + text: "Cluster Networking" + checks: + - id: 5.6.1 + text: "Enable VPC Flow Logs and Intranode Visibility (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable intranode visibility on an existing cluster, run the following command: + + gcloud beta container clusters update [CLUSTER_NAME] \ + --enable-intra-node-visibility + scored: false + + - id: 5.6.2 + text: "Ensure use of VPC-native clusters (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable Alias IP on a new cluster, run the following command: + + gcloud container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --enable-ip-alias + scored: false + + - id: 5.6.3 + text: "Ensure Master Authorized Networks is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To check Master Authorized Networks status for an existing cluster, run the following + command; + + gcloud container clusters describe [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --format json | jq '.masterAuthorizedNetworksConfig' + + The output should return + + { + "enabled": true + } + + if Master Authorized Networks is enabled. + + If Master Authorized Networks is disabled, the + above command will return null ( { } ). + scored: false + + - id: 5.6.4 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a cluster with a Private Endpoint enabled and Public Access disabled by including + the --enable-private-endpoint flag within the cluster create command: + + gcloud container clusters create [CLUSTER_NAME] \ + --enable-private-endpoint + + Setting this flag also requires the setting of --enable-private-nodes , --enable-ip-alias + and --master-ipv4-cidr=[MASTER_CIDR_RANGE] . + scored: false + + - id: 5.6.5 + text: "Ensure clusters are created with Private Nodes (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a cluster with Private Nodes enabled, include the --enable-private-nodes flag + within the cluster create command: + + gcloud container clusters create [CLUSTER_NAME] \ + --enable-private-nodes + + Setting this flag also requires the setting of --enable-ip-alias and --master-ipv4- + cidr=[MASTER_CIDR_RANGE] . + scored: false + + - id: 5.6.6 + text: "Consider firewalling GKE worker nodes (Manual)" + type: "manual" + remediation: | + Using Command Line: + Use the following command to generate firewall rules, setting the variables as appropriate. + You may want to use the target [TAG] and [SERVICE_ACCOUNT] previously identified. + + gcloud compute firewall-rules create FIREWALL_RULE_NAME \ + --network [NETWORK] \ + --priority [PRIORITY] \ + --direction [DIRECTION] \ + --action [ACTION] \ + --target-tags [TAG] \ + --target-service-accounts [SERVICE_ACCOUNT] \ + --source-ranges [SOURCE_CIDR-RANGE] \ + --source-tags [SOURCE_TAGS] \ + --source-service-accounts=[SOURCE_SERVICE_ACCOUNT] \ + --destination-ranges [DESTINATION_CIDR_RANGE] \ + --rules [RULES] + scored: false + + - id: 5.6.7 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: | + Using Command Line: + To enable Network Policy for an existing cluster, firstly enable the Network Policy add-on: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --update-addons NetworkPolicy=ENABLED + + Then, enable Network Policy: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --enable-network-policy + scored: false + + - id: 5.6.8 + text: "Ensure use of Google-managed SSL Certificates (Manual)" + type: "manual" + remediation: | + If services of type:LoadBalancer are discovered, consider replacing the Service with an + Ingress. + + To configure the Ingress and use Google-managed SSL certificates, follow the instructions + as listed at https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs. + scored: false + + - id: 5.7 + text: "Logging" + checks: + - id: 5.7.1 + text: "Ensure Stackdriver Kubernetes Logging and Monitoring is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + + STACKDRIVER KUBERNETES ENGINE MONITORING SUPPORT (PREFERRED): + To enable Stackdriver Kubernetes Engine Monitoring for an existing cluster, run the + following command: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --enable-stackdriver-kubernetes + + LEGACY STACKDRIVER SUPPORT: + Both Logging and Monitoring support must be enabled. + To enable Legacy Stackdriver Logging for an existing cluster, run the following command: + + gcloud container clusters update [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --logging-service logging.googleapis.com + + To enable Legacy Stackdriver Monitoring for an existing cluster, run the following + command: + + gcloud container clusters update [CLUSTER_NAME] --zone [COMPUTE_ZONE] \ + --monitoring-service monitoring.googleapis.com + scored: false + + - id: 5.7.2 + text: "Enable Linux auditd logging (Manual)" + type: "manual" + remediation: | + Using Command Line: + Download the example manifests: + + curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml \ + > cos-auditd-logging.yaml + + Edit the example manifests if needed. Then, deploy them: + + kubectl apply -f cos-auditd-logging.yaml + + Verify that the logging Pods have started. If you defined a different Namespace in your + manifests, replace cos-auditd with the name of the namespace you're using: + + kubectl get pods --namespace=cos-auditd + scored: false + + - id: 5.8 + text: "Authentication and Authorization" + checks: + - id: 5.8.1 + text: "Ensure Basic Authentication using static passwords is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To update an existing cluster and disable Basic Authentication by removing the static + password: + + gcloud container clusters update [CLUSTER_NAME] \ + --no-enable-basic-auth + scored: false + + - id: 5.8.2 + text: "Ensure authentication using Client Certificates is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a new cluster without a Client Certificate: + + gcloud container clusters create [CLUSTER_NAME] \ + --no-issue-client-certificate + scored: false + + - id: 5.8.3 + text: "Manage Kubernetes RBAC users with Google Groups for GKE (Manual)" + type: "manual" + remediation: | + Using Command Line: + Follow the G Suite Groups instructions at https://cloud.google.com/kubernetes- + engine/docs/how-to/role-based-access-control#google-groups-for-gke. + + Then, create a cluster with + + gcloud beta container clusters create my-cluster \ + --security-group="gke-security-groups@[yourdomain.com]" + + Finally create Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings that + reference your G Suite Groups. + scored: false + + - id: 5.8.4 + text: "Ensure Legacy Authorization (ABAC) is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To disable Legacy Authorization for an existing cluster, run the following command: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --no-enable-legacy-authorization + scored: false + + - id: 5.9 + text: "Storage" + checks: + - id: 5.9.1 + text: "Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) (Manual)" + type: "manual" + remediation: | + Using Command Line: + FOR NODE BOOT DISKS: + Create a new node pool using customer-managed encryption keys for the node boot disk, of + [DISK_TYPE] either pd-standard or pd-ssd : + + gcloud beta container node-pools create [CLUSTER_NAME] \ + --disk-type [DISK_TYPE] \ + --boot-disk-kms-key \ + projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME] + + Create a cluster using customer-managed encryption keys for the node boot disk, of + [DISK_TYPE] either pd-standard or pd-ssd : + + gcloud beta container clusters create [CLUSTER_NAME] \ + --disk-type [DISK_TYPE] \ + --boot-disk-kms-key \ + projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME] + + FOR ATTACHED DISKS: + Follow the instructions detailed at https://cloud.google.com/kubernetes- + engine/docs/how-to/using-cmek. + scored: false + + - id: 5.10 + text: "Other Cluster Configurations" + checks: + - id: 5.10.1 + text: "Ensure Kubernetes Web UI is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To disable the Kubernetes Dashboard on an existing cluster, run the following command: + + gcloud container clusters update [CLUSTER_NAME] \ + --zone [ZONE] \ + --update-addons=KubernetesDashboard=DISABLED + scored: false + + - id: 5.10.2 + text: "Ensure that Alpha clusters are not used for production workloads (Automated)" + type: "manual" + remediation: | + Using Command Line: + Upon creating a new cluster + + gcloud container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] + + Do not use the --enable-kubernetes-alpha argument. + scored: false + + - id: 5.10.3 + text: "Ensure Pod Security Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: | + Using Command Line: + To enable Pod Security Policy for an existing cluster, run the following command: + + gcloud beta container clusters update [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] \ + --enable-pod-security-policy + scored: false + + - id: 5.10.4 + text: "Consider GKE Sandbox for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Using Command Line: + To enable GKE Sandbox on an existing cluster, a new Node pool must be created. + + gcloud container node-pools create [NODE_POOL_NAME] \ + --zone=[COMPUTE-ZONE] \ + --cluster=[CLUSTER_NAME] \ + --image-type=cos_containerd \ + --sandbox type=gvisor + scored: false + + - id: 5.10.5 + text: "Ensure use of Binary Authorization (Automated)" + type: "manual" + remediation: | + Using Command Line: + Firstly, update the cluster to enable Binary Authorization: + + gcloud container cluster update [CLUSTER_NAME] \ + --zone [COMPUTE-ZONE] \ + --enable-binauthz + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference + (https://cloud.google.com/binary-authorization/docs/policy-yaml-reference) for + guidance. + + Import the policy file into Binary Authorization: + + gcloud container binauthz policy import [YAML_POLICY] + scored: false + + - id: 5.10.6 + text: "Enable Cloud Security Command Center (Cloud SCC) (Manual)" + type: "manual" + remediation: | + Using Command Line: + Follow the instructions at https://cloud.google.com/security-command- + center/docs/quickstart-scc-setup. + scored: false diff --git a/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/master.yaml b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/master.yaml new file mode 100644 index 00000000..10fbe76d --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "gke-1.4.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/node.yaml b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/node.yaml new file mode 100644 index 00000000..29ef8a5f --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/node.yaml @@ -0,0 +1,312 @@ +--- +controls: +version: "gke-1.4.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example, + chmod 644 $proxykubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identied in the Audit step) + chmod 644 /var/lib/kubelet/config.yaml + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identied in the Audit step) + chown root:root /etc/kubernetes/kubelet.conf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.8 + text: "Ensure that the --eventrecordqps argument is set to 5 or higher or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: gte + value: 5 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set tlsCertFile to the location + of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.11 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true diff --git a/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/policies.yaml b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/policies.yaml new file mode 100644 index 00000000..035345e0 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench-rules/gke-1.4.0/policies.yaml @@ -0,0 +1,230 @@ +--- +controls: +version: "gke-1.4.0" +id: 4 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: true + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.2 + text: "Pod Security Standards" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of privileged containers. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostPID containers. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostIPC containers. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of hostNetwork containers. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads + to restrict the admission of containers with .spec.allowPrivilegeEscalation set to true. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, + ensuring that either MustRunAsNonRoot or MustRunAs with the range of UIDs not including 0, is set. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with capabilities assigned (Manual) " + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabilities to operate consider adding + a policy which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "Network Policies and CNI" + checks: + - id: 4.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin + will be updated. See Recommendation 6.6.7. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: + - id: 4.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + See also Recommendation 6.10.5 for GKE specifically. + scored: false + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" + type: "manual" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + scored: false + + - id: 4.6.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.6.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/kvisor/kubebench/kubebench.go b/cmd/kvisor/kubebench/kubebench.go new file mode 100644 index 00000000..15133595 --- /dev/null +++ b/cmd/kvisor/kubebench/kubebench.go @@ -0,0 +1,219 @@ +// Copyright © 2017 Aqua Security Software Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubebench + +import ( + goflag "flag" + "fmt" + "os" + + check2 "github.com/castai/kvisor/cmd/kvisor/kubebench/check" + "github.com/golang/glog" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +type FilterOpts struct { + CheckList string + GroupList string + Scored bool + Unscored bool +} + +var ( + envVarsPrefix = "KUBE_BENCH" + defaultKubeVersion = "1.18" + kubeVersion string + detecetedKubeVersion string + benchmarkVersion string + cfgFile string + cfgDir = "./kubebench-rules/" + jsonFmt bool + junitFmt bool + masterFile = "master.yaml" + nodeFile = "node.yaml" + etcdFile = "etcd.yaml" + controlplaneFile = "controlplane.yaml" + policiesFile = "policies.yaml" + managedservicesFile = "managedservices.yaml" + exitCode int + noResults bool + noSummary bool + noRemediations bool + skipIds string + noTotals bool + filterOpts FilterOpts + includeTestOutput bool + outputFile string + configFileError error + controlsCollection []*check2.Controls +) + +func NewCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "kube-bench", + Short: "Run CIS Benchmarks checks against a Kubernetes deployment", + Long: `This tool runs the CIS Kubernetes Benchmark (https://www.cisecurity.org/benchmark/kubernetes/)`, + Run: func(cmd *cobra.Command, args []string) { + bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformInfo(), viper.GetViper()) + if err != nil { + exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err)) + } + glog.V(1).Infof("Running checks for benchmark %v", bv) + + if isMaster() { + glog.V(1).Info("== Running master checks ==") + runChecks(check2.MASTER, loadConfig(check2.MASTER, bv), detecetedKubeVersion) + + // Control Plane is only valid for CIS 1.5 and later, + // this a gatekeeper for previous versions + valid, err := validTargets(bv, []string{string(check2.CONTROLPLANE)}, viper.GetViper()) + if err != nil { + exitWithError(fmt.Errorf("error validating targets: %v", err)) + } + if valid { + glog.V(1).Info("== Running control plane checks ==") + runChecks(check2.CONTROLPLANE, loadConfig(check2.CONTROLPLANE, bv), detecetedKubeVersion) + } + } else { + glog.V(1).Info("== Skipping master checks ==") + } + + // Etcd is only valid for CIS 1.5 and later, + // this a gatekeeper for previous versions. + valid, err := validTargets(bv, []string{string(check2.ETCD)}, viper.GetViper()) + if err != nil { + exitWithError(fmt.Errorf("error validating targets: %v", err)) + } + if valid && isEtcd() { + glog.V(1).Info("== Running etcd checks ==") + runChecks(check2.ETCD, loadConfig(check2.ETCD, bv), detecetedKubeVersion) + } else { + glog.V(1).Info("== Skipping etcd checks ==") + } + + glog.V(1).Info("== Running node checks ==") + runChecks(check2.NODE, loadConfig(check2.NODE, bv), detecetedKubeVersion) + + // Policies is only valid for CIS 1.5 and later, + // this a gatekeeper for previous versions. + valid, err = validTargets(bv, []string{string(check2.POLICIES)}, viper.GetViper()) + if err != nil { + exitWithError(fmt.Errorf("error validating targets: %v", err)) + } + if valid { + glog.V(1).Info("== Running policies checks ==") + runChecks(check2.POLICIES, loadConfig(check2.POLICIES, bv), detecetedKubeVersion) + } else { + glog.V(1).Info("== Skipping policies checks ==") + } + + // Managedservices is only valid for GKE 1.0 and later, + // this a gatekeeper for previous versions. + valid, err = validTargets(bv, []string{string(check2.MANAGEDSERVICES)}, viper.GetViper()) + if err != nil { + exitWithError(fmt.Errorf("error validating targets: %v", err)) + } + if valid { + glog.V(1).Info("== Running managed services checks ==") + runChecks(check2.MANAGEDSERVICES, loadConfig(check2.MANAGEDSERVICES, bv), detecetedKubeVersion) + } else { + glog.V(1).Info("== Skipping managed services checks ==") + } + + writeOutput(controlsCollection) + os.Exit(exitCodeSelection(controlsCollection)) + }, + } + + cobra.OnInitialize(initConfig) + + // Output control + cmd.PersistentFlags().IntVar(&exitCode, "exit-code", 0, "Specify the exit code for when checks fail") + cmd.PersistentFlags().BoolVar(&noResults, "noresults", false, "Disable printing of results section") + cmd.PersistentFlags().BoolVar(&noSummary, "nosummary", false, "Disable printing of summary section") + cmd.PersistentFlags().BoolVar(&noRemediations, "noremediations", false, "Disable printing of remediations section") + cmd.PersistentFlags().BoolVar(&noTotals, "nototals", false, "Disable printing of totals for failed, passed, ... checks across all sections") + cmd.PersistentFlags().BoolVar(&jsonFmt, "json", false, "Prints the results as JSON") + cmd.PersistentFlags().BoolVar(&junitFmt, "junit", false, "Prints the results as JUnit") + cmd.PersistentFlags().BoolVar(&filterOpts.Scored, "scored", true, "Run the scored CIS checks") + cmd.PersistentFlags().BoolVar(&filterOpts.Unscored, "unscored", true, "Run the unscored CIS checks") + cmd.PersistentFlags().StringVar(&skipIds, "skip", "", "List of comma separated values of checks to be skipped") + cmd.PersistentFlags().BoolVar(&includeTestOutput, "include-test-output", false, "Prints the actual result when test fails") + cmd.PersistentFlags().StringVar(&outputFile, "outputfile", "", "Writes the results to output file when run with --json or --junit") + + cmd.PersistentFlags().StringVarP( + &filterOpts.CheckList, + "check", + "c", + "", + `A comma-delimited list of checks to run as specified in CIS document. Example --check="1.1.1,1.1.2"`, + ) + cmd.PersistentFlags().StringVarP( + &filterOpts.GroupList, + "group", + "g", + "", + `Run all the checks under this comma-delimited list of groups. Example --group="1.1"`, + ) + cmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is ./cfg/config.yaml)") + cmd.PersistentFlags().StringVarP(&cfgDir, "config-dir", "D", cfgDir, "config directory") + cmd.PersistentFlags().StringVar(&kubeVersion, "version", "", "Manually specify Kubernetes version, automatically detected if unset") + cmd.PersistentFlags().StringVar(&benchmarkVersion, "benchmark", "", "Manually specify CIS benchmark version. It would be an error to specify both --version and --benchmark flags") + + if err := goflag.Set("logtostderr", "true"); err != nil { + fmt.Printf("unable to set logtostderr: %+v\n", err) + os.Exit(-1) + } + goflag.CommandLine.VisitAll(func(goflag *goflag.Flag) { + cmd.PersistentFlags().AddGoFlag(goflag) + }) + + return cmd +} + +// initConfig reads in config file and ENV variables if set. +func initConfig() { + if cfgFile != "" { // enable ability to specify config file via flag + viper.SetConfigFile(cfgFile) + } else { + viper.SetConfigName("config") // name of config file (without extension) + viper.AddConfigPath(cfgDir) // adding ./cfg as first search path + } + + // Read flag values from environment variables. + // Precedence: Command line flags take precedence over environment variables. + viper.SetEnvPrefix(envVarsPrefix) + viper.AutomaticEnv() + + if kubeVersion == "" { + if env := viper.Get("version"); env != nil { + kubeVersion = env.(string) + } + } + + // If a config file is found, read it in. + if err := viper.ReadInConfig(); err != nil { + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + // Config file not found; ignore error for now to prevent commands + // which don't need the config file exiting. + configFileError = err + } else { + // Config file was found but another error was produced + colorPrint(check2.FAIL, fmt.Sprintf("Failed to read config file: %v\n", err)) + os.Exit(1) + } + } +} diff --git a/cmd/kvisor/kubebench/kubernetes_version.go b/cmd/kvisor/kubebench/kubernetes_version.go new file mode 100644 index 00000000..57206dd2 --- /dev/null +++ b/cmd/kvisor/kubebench/kubernetes_version.go @@ -0,0 +1,183 @@ +package kubebench + +import ( + "crypto/tls" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "time" + + "github.com/golang/glog" +) + +type KubeVersion struct { + Major string + Minor string + baseVersion string + GitVersion string +} + +func (k *KubeVersion) BaseVersion() string { + if k.baseVersion != "" { + return k.baseVersion + } + // Some provides return the minor version like "15+" + minor := strings.Replace(k.Minor, "+", "", -1) + ver := fmt.Sprintf("%s.%s", k.Major, minor) + k.baseVersion = ver + return ver +} + +func getKubeVersionFromRESTAPI() (*KubeVersion, error) { + glog.V(2).Info("Try to get version from Rest API") + k8sVersionURL := getKubernetesURL() + serviceaccount := "/var/run/secrets/kubernetes.io/serviceaccount" + cacertfile := fmt.Sprintf("%s/ca.crt", serviceaccount) + tokenfile := fmt.Sprintf("%s/token", serviceaccount) + + tlsCert, err := loadCertificate(cacertfile) + if err != nil { + glog.V(2).Infof("Failed loading certificate Error: %s", err) + return nil, err + } + + tb, err := ioutil.ReadFile(tokenfile) + if err != nil { + glog.V(2).Infof("Failed reading token file Error: %s", err) + return nil, err + } + token := strings.TrimSpace(string(tb)) + + data, err := getWebDataWithRetry(k8sVersionURL, token, tlsCert) + if err != nil { + glog.V(2).Infof("Failed to get data Error: %s", err) + return nil, err + } + + k8sVersion, err := extractVersion(data) + if err != nil { + return nil, err + } + return k8sVersion, nil +} + +// The idea of this function is so if Kubernetes DNS is not completely seetup and the +// Container where kube-bench is running needs time for DNS configure. +// Basically try 10 times, waiting 1 second until either it is successful or it fails. +func getWebDataWithRetry(k8sVersionURL, token string, cacert *tls.Certificate) (data []byte, err error) { + tries := 0 + // We retry a few times in case the DNS service has not had time to come up + for tries < 10 { + data, err = getWebData(k8sVersionURL, token, cacert) + if err == nil { + return + } + tries++ + time.Sleep(1 * time.Second) + } + + return +} + +type VersionResponse struct { + Major string + Minor string + GitVersion string + GitCommit string + GitTreeState string + BuildDate string + GoVersion string + Compiler string + Platform string +} + +func extractVersion(data []byte) (*KubeVersion, error) { + vrObj := &VersionResponse{} + glog.V(2).Info(fmt.Sprintf("vd: %s\n", string(data))) + err := json.Unmarshal(data, vrObj) + if err != nil { + return nil, err + } + glog.V(2).Info(fmt.Sprintf("vrObj: %#v\n", vrObj)) + + return &KubeVersion{ + Major: vrObj.Major, + Minor: vrObj.Minor, + GitVersion: vrObj.GitVersion, + }, nil +} + +func getWebData(srvURL, token string, cacert *tls.Certificate) ([]byte, error) { + glog.V(2).Info(fmt.Sprintf("getWebData srvURL: %s\n", srvURL)) + + tlsConf := &tls.Config{ + Certificates: []tls.Certificate{*cacert}, + InsecureSkipVerify: true, + } + tr := &http.Transport{ + TLSClientConfig: tlsConf, + } + client := &http.Client{Transport: tr} + req, err := http.NewRequest(http.MethodGet, srvURL, nil) + if err != nil { + return nil, err + } + + authToken := fmt.Sprintf("Bearer %s", token) + req.Header.Set("Authorization", authToken) + + resp, err := client.Do(req) + if err != nil { + glog.V(2).Info(fmt.Sprintf("HTTP ERROR: %v\n", err)) + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + glog.V(2).Info(fmt.Sprintf("URL:[%s], StatusCode:[%d] \n Headers: %#v\n", srvURL, resp.StatusCode, resp.Header)) + err = fmt.Errorf("URL:[%s], StatusCode:[%d]", srvURL, resp.StatusCode) + return nil, err + } + + return ioutil.ReadAll(resp.Body) +} + +func loadCertificate(certFile string) (*tls.Certificate, error) { + cacert, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + + var tlsCert tls.Certificate + block, _ := pem.Decode(cacert) + if block == nil { + return nil, fmt.Errorf("unable to Decode certificate") + } + + glog.V(2).Info("Loading CA certificate") + tlsCert.Certificate = append(tlsCert.Certificate, block.Bytes) + return &tlsCert, nil +} + +func getKubernetesURL() string { + k8sVersionURL := "https://kubernetes.default.svc/version" + + // The following provides flexibility to use + // K8S provided variables is situations where + // hostNetwork: true + if !isEmpty(os.Getenv("KUBE_BENCH_K8S_ENV")) { + k8sHost := os.Getenv("KUBERNETES_SERVICE_HOST") + k8sPort := os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS") + if !isEmpty(k8sHost) && !isEmpty(k8sPort) { + return fmt.Sprintf("https://%s:%s/version", k8sHost, k8sPort) + } + + glog.V(2).Info("KUBE_BENCH_K8S_ENV is set, but environment variables KUBERNETES_SERVICE_HOST or KUBERNETES_SERVICE_PORT_HTTPS are not set") + } + + return k8sVersionURL +} diff --git a/cmd/kvisor/kubebench/kubernetes_version_test.go b/cmd/kvisor/kubebench/kubernetes_version_test.go new file mode 100644 index 00000000..f833f782 --- /dev/null +++ b/cmd/kvisor/kubebench/kubernetes_version_test.go @@ -0,0 +1,272 @@ +package kubebench + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strconv" + "testing" +) + +func TestLoadCertificate(t *testing.T) { + tmp, err := ioutil.TempDir("", "TestFakeLoadCertificate") + if err != nil { + t.Fatalf("unable to create temp directory: %v", err) + } + defer os.RemoveAll(tmp) + + goodCertFile, _ := ioutil.TempFile(tmp, "good-cert-*") + _, _ = goodCertFile.Write([]byte(`-----BEGIN CERTIFICATE----- +MIICyDCCAbCgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl +cm5ldGVzMB4XDTE5MTEwODAxNDAwMFoXDTI5MTEwNTAxNDAwMFowFTETMBEGA1UE +AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMn6 +wjvhMc9e0MDwpQNhp8SPxmv1DsYJ4Btp1GeScIgKKDwppuoOmVizLiMNdV5+70yI +MgNfm/gwFRNDOtN3R7msfZDD5Dd1vI6qRTP21DFOGVdysFdwqJTs0nGcmfvZEOtw +9cjcsXrBi2Mg54v+X/pq2w51xajCGBt2+bpxJJ3WBiWqKYv0RQdNL0WZGm+V9BuP +pHRWPBeLxuCzt5K3Gx+1QDy8o6Y4sSRPssWC4RhD9Hs5/9eeGRyZslLs+AuqdDLQ +aziiSjHVtgCfRXE9nYVxaDIwTFuh+Q1IvtB36NRLyX47oya+BbX3PoCtSjA36RBb +tcJfulr3oNHnb2ZlfcUCAwEAAaMjMCEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAAeQDkbM6DilLkIVQDyxauETgJDV +2AaVzYaAgDApQGAoYV6WIY7Exk4TlmLeKQjWt2s/GtthQWuzUDKTcEvWcG6gNdXk +gzuCRRDMGu25NtG3m67w4e2RzW8Z/lzvbfyJZGoV2c6dN+yP9/Pw2MXlrnMWugd1 +jLv3UYZRHMpuNS8BJU74BuVzVPHd55RAl+bV8yemdZJ7pPzMvGbZ7zRXWODTDlge +CQb9lY+jYErisH8Sq7uABFPvi7RaTh8SS7V7OxqHZvmttNTdZs4TIkk45JK7Y+Xq +FAjB57z2NcIgJuVpQnGRYtr/JcH2Qdsq8bLtXaojUIWOOqoTDRLYozdMOOQ= +-----END CERTIFICATE-----`)) + badCertFile, _ := ioutil.TempFile(tmp, "bad-cert-*") + + cases := []struct { + file string + fail bool + }{ + { + file: "missing cert file", + fail: true, + }, + { + file: badCertFile.Name(), + fail: true, + }, + { + file: goodCertFile.Name(), + fail: false, + }, + } + + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + tlsCert, err := loadCertificate(c.file) + if !c.fail { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if tlsCert == nil { + t.Errorf("missing returned TLS Certificate") + } + } else { + if err == nil { + t.Errorf("Expected error") + } + } + }) + } +} + +func TestGetWebData(t *testing.T) { + okfn := func(w http.ResponseWriter, r *http.Request) { + _, _ = fmt.Fprintln(w, `{ + "major": "1", + "minor": "15"}`) + } + errfn := func(w http.ResponseWriter, r *http.Request) { + http.Error(w, http.StatusText(http.StatusInternalServerError), + http.StatusInternalServerError) + } + token := "dummyToken" + var tlsCert tls.Certificate + + cases := []struct { + fn http.HandlerFunc + fail bool + }{ + { + fn: okfn, + fail: false, + }, + { + fn: errfn, + fail: true, + }, + } + + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + ts := httptest.NewServer(c.fn) + defer ts.Close() + data, err := getWebData(ts.URL, token, &tlsCert) + if !c.fail { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if len(data) == 0 { + t.Errorf("missing data") + } + } else { + if err == nil { + t.Errorf("Expected error") + } + } + }) + } +} + +func TestGetWebDataWithRetry(t *testing.T) { + okfn := func(w http.ResponseWriter, r *http.Request) { + _, _ = fmt.Fprintln(w, `{ + "major": "1", + "minor": "15"}`) + } + errfn := func(w http.ResponseWriter, r *http.Request) { + http.Error(w, http.StatusText(http.StatusInternalServerError), + http.StatusInternalServerError) + } + token := "dummyToken" + var tlsCert tls.Certificate + + cases := []struct { + fn http.HandlerFunc + fail bool + }{ + { + fn: okfn, + fail: false, + }, + { + fn: errfn, + fail: true, + }, + } + + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + ts := httptest.NewServer(c.fn) + defer ts.Close() + data, err := getWebDataWithRetry(ts.URL, token, &tlsCert) + if !c.fail { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if len(data) == 0 { + t.Errorf("missing data") + } + } else { + if err == nil { + t.Errorf("Expected error") + } + } + }) + } +} + +func TestExtractVersion(t *testing.T) { + okJSON := []byte(`{ + "major": "1", + "minor": "15", + "gitVersion": "v1.15.3", + "gitCommit": "2d3c76f9091b6bec110a5e63777c332469e0cba2", + "gitTreeState": "clean", + "buildDate": "2019-08-20T18:57:36Z", + "goVersion": "go1.12.9", + "compiler": "gc", + "platform": "linux/amd64" + }`) + + invalidJSON := []byte(`{ + "major": "1", + "minor": "15", + "gitVersion": "v1.15.3", + "gitCommit": "2d3c76f9091b6bec110a5e63777c332469e0cba2", + "gitTreeState": "clean",`) + + cases := []struct { + data []byte + fail bool + expectedVer string + }{ + { + data: okJSON, + fail: false, + expectedVer: "1.15", + }, + { + data: invalidJSON, + fail: true, + }, + } + + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + ver, err := extractVersion(c.data) + if !c.fail { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if c.expectedVer != ver.BaseVersion() { + t.Errorf("Expected %q but Got %q", c.expectedVer, ver) + } + } else { + if err == nil { + t.Errorf("Expected error") + } + } + }) + } +} + +func TestGetKubernetesURL(t *testing.T) { + resetEnvs := func() { + os.Unsetenv("KUBE_BENCH_K8S_ENV") + os.Unsetenv("KUBERNETES_SERVICE_HOST") + os.Unsetenv("KUBERNETES_SERVICE_PORT_HTTPS") + } + + setEnvs := func() { + os.Setenv("KUBE_BENCH_K8S_ENV", "1") + os.Setenv("KUBERNETES_SERVICE_HOST", "testHostServer") + os.Setenv("KUBERNETES_SERVICE_PORT_HTTPS", "443") + } + + cases := []struct { + useDefault bool + expected string + }{ + { + useDefault: true, + expected: "https://kubernetes.default.svc/version", + }, + { + useDefault: false, + expected: "https://testHostServer:443/version", + }, + } + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + resetEnvs() + defer resetEnvs() + if !c.useDefault { + setEnvs() + } + k8sURL := getKubernetesURL() + + if k8sURL != c.expected { + t.Errorf("Expected %q but Got %q", k8sURL, c.expected) + } + }) + } +} diff --git a/cmd/kvisor/kubebench/run.go b/cmd/kvisor/kubebench/run.go new file mode 100644 index 00000000..7b462844 --- /dev/null +++ b/cmd/kvisor/kubebench/run.go @@ -0,0 +1,110 @@ +package kubebench + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/castai/kvisor/cmd/kvisor/kubebench/check" + "github.com/golang/glog" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func NewRunCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "run", + Short: "Run kube-bench tests", + Run: func(cmd *cobra.Command, args []string) { + targets, err := cmd.Flags().GetStringSlice("targets") + if err != nil { + exitWithError(fmt.Errorf("unable to get `targets` from command line :%v", err)) + } + + bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformInfo(), viper.GetViper()) + if err != nil { + exitWithError(fmt.Errorf("unable to get benchmark version. error: %v", err)) + } + + glog.V(2).Infof("Checking targets %v for %v", targets, bv) + benchmarkVersionToTargetsMap, err := loadTargetMapping(viper.GetViper()) + if err != nil { + exitWithError(fmt.Errorf("error loading targets: %v", err)) + } + valid, err := validTargets(bv, targets, viper.GetViper()) + if err != nil { + exitWithError(fmt.Errorf("error validating targets: %v", err)) + } + if len(targets) > 0 && !valid { + exitWithError(fmt.Errorf(fmt.Sprintf(`The specified --targets "%s" are not configured for the CIS Benchmark %s\n Valid targets %v`, strings.Join(targets, ","), bv, benchmarkVersionToTargetsMap[bv]))) + } + + // Merge version-specific config if any. + path := filepath.Join(cfgDir, bv) + err = mergeConfig(path) + if err != nil { + exitWithError(fmt.Errorf("Error in mergeConfig: %v\n", err)) + } + + err = run(targets, bv) + if err != nil { + exitWithError(fmt.Errorf("Error in run: %v\n", err)) + } + + os.Exit(exitCodeSelection(controlsCollection)) + }, + } + cmd.Flags().StringSliceP("targets", "s", []string{}, + `Specify targets of the benchmark to run. These names need to match the filenames in the cfg/ directory. + For example, to run the tests specified in master.yaml and etcd.yaml, specify --targets=master,etcd + If no targets are specified, run tests from all files in the cfg/ directory. + `) + + return cmd +} + +func run(targets []string, benchmarkVersion string) (err error) { + yamlFiles, err := getTestYamlFiles(targets, benchmarkVersion) + if err != nil { + return err + } + + glog.V(3).Infof("Running tests from files %v\n", yamlFiles) + + for _, yamlFile := range yamlFiles { + _, name := filepath.Split(yamlFile) + testType := check.NodeType(strings.Split(name, ".")[0]) + runChecks(testType, yamlFile, detecetedKubeVersion) + } + + writeOutput(controlsCollection) + return nil +} + +func getTestYamlFiles(targets []string, benchmarkVersion string) (yamlFiles []string, err error) { + // Check that the specified targets have corresponding YAML files in the config directory + configFileDirectory := filepath.Join(cfgDir, benchmarkVersion) + for _, target := range targets { + filename := translate(target) + ".yaml" + file := filepath.Join(configFileDirectory, filename) + if _, err := os.Stat(file); err != nil { + return nil, fmt.Errorf("file %s not found for version %s", filename, benchmarkVersion) + } + yamlFiles = append(yamlFiles, file) + } + + // If no targets were specified, we will run tests from all the files in the directory + if len(yamlFiles) == 0 { + yamlFiles, err = getYamlFilesFromDir(configFileDirectory) + if err != nil { + return nil, err + } + } + + return yamlFiles, err +} + +func translate(target string) string { + return strings.Replace(strings.ToLower(target), "worker", "node", -1) +} diff --git a/cmd/kvisor/kubebench/run_test.go b/cmd/kvisor/kubebench/run_test.go new file mode 100644 index 00000000..d275cbee --- /dev/null +++ b/cmd/kvisor/kubebench/run_test.go @@ -0,0 +1,122 @@ +package kubebench + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetTestYamlFiles(t *testing.T) { + cases := []struct { + name string + targets []string + benchmark string + succeed bool + expCount int + }{ + { + name: "Specify two targets", + targets: []string{"one", "two"}, + benchmark: "benchmark", + succeed: true, + expCount: 2, + }, + { + name: "Specify a target that doesn't exist", + targets: []string{"one", "missing"}, + benchmark: "benchmark", + succeed: false, + }, + { + name: "No targets specified - should return everything except config.yaml", + targets: []string{}, + benchmark: "benchmark", + succeed: true, + expCount: 3, + }, + { + name: "Specify benchmark that doesn't exist", + targets: []string{"one"}, + benchmark: "missing", + succeed: false, + }, + } + + // Set up temp config directory + var err error + cfgDir, err = ioutil.TempDir("", "kube-bench-test") + if err != nil { + t.Fatalf("Failed to create temp directory") + } + defer os.RemoveAll(cfgDir) + + d := filepath.Join(cfgDir, "benchmark") + err = os.Mkdir(d, 0766) + if err != nil { + t.Fatalf("Failed to create temp dir") + } + + // We never expect config.yaml to be returned + for _, filename := range []string{"one.yaml", "two.yaml", "three.yaml", "config.yaml"} { + err = ioutil.WriteFile(filepath.Join(d, filename), []byte("hello world"), 0666) + if err != nil { + t.Fatalf("error writing temp file %s: %v", filename, err) + } + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + yamlFiles, err := getTestYamlFiles(c.targets, c.benchmark) + if err != nil && c.succeed { + t.Fatalf("Error %v", err) + } + + if err == nil && !c.succeed { + t.Fatalf("Expected failure") + } + + if len(yamlFiles) != c.expCount { + t.Fatalf("Expected %d, got %d", c.expCount, len(yamlFiles)) + } + }) + } +} + +func TestTranslate(t *testing.T) { + cases := []struct { + name string + original string + expected string + }{ + { + name: "keep", + original: "controlplane", + expected: "controlplane", + }, + { + name: "translate", + original: "worker", + expected: "node", + }, + { + name: "translateLower", + original: "Worker", + expected: "node", + }, + { + name: "Lower", + original: "ETCD", + expected: "etcd", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ret := translate(c.original) + if ret != c.expected { + t.Fatalf("Expected %q, got %q", c.expected, ret) + } + }) + } +} diff --git a/cmd/kvisor/kubebench/testdata/controlsCollection.json b/cmd/kvisor/kubebench/testdata/controlsCollection.json new file mode 100644 index 00000000..db71728c --- /dev/null +++ b/cmd/kvisor/kubebench/testdata/controlsCollection.json @@ -0,0 +1,114 @@ +[ + { + "id": "2", + "version": "1.15", + "text": "Etcd Node Configuration", + "node_type": "etcd", + "tests": [ + { + "section": "2", + "pass": 7, + "fail": 0, + "warn": 0, + "info": 0, + "desc": "Etcd Node Configuration Files", + "results": [ + { + "test_number": "2.1", + "test_desc": "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)", + "audit": "/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep", + "AuditConfig": "", + "type": "", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\nThen, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml\non the master node and set the below parameters.\n--cert-file=\n--key-file=\n", + "test_info": [ + "Follow the etcd service documentation and configure TLS encryption.\nThen, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml\non the master node and set the below parameters.\n--cert-file=\n--key-file=\n" + ], + "status": "PASS", + "actual_value": "root 3277 3218 3 Apr19 ? 03:57:52 etcd --advertise-client-urls=https://192.168.64.4:2379 --cert-file=/var/lib/minikube/certs/etcd/server.crt --client-cert-auth=true --data-dir=/var/lib/minikube/etcd --initial-advertise-peer-urls=https://192.168.64.4:2380 --initial-cluster=minikube=https://192.168.64.4:2380 --key-file=/var/lib/minikube/certs/etcd/server.key --listen-client-urls=https://127.0.0.1:2379,https://192.168.64.4:2379 --listen-metrics-urls=http://127.0.0.1:2381 --listen-peer-urls=https://192.168.64.4:2380 --name=minikube --peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt --peer-client-cert-auth=true --peer-key-file=/var/lib/minikube/certs/etcd/peer.key --peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt --snapshot-count=10000 --trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt\nroot 4624 4605 8 Apr21 ? 04:55:10 kube-apiserver --advertise-address=192.168.64.4 --allow-privileged=true --authorization-mode=Node,RBAC --client-ca-file=/var/lib/minikube/certs/ca.crt --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,PodSecurityPolicy --enable-bootstrap-token-auth=true --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key --requestheader-allowed-names=front-proxy-client --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=8443 --service-account-key-file=/var/lib/minikube/certs/sa.pub --service-cluster-ip-range=10.96.0.0/12 --tls-cert-file=/var/lib/minikube/certs/apiserver.crt --tls-private-key-file=/var/lib/minikube/certs/apiserver.key\n", + "scored": true, + "expected_result": "'--cert-file' is present AND '--key-file' is present" + } + ] + } + ], + "total_pass": 7, + "total_fail": 0, + "total_warn": 0, + "total_info": 0 + }, + { + "id": "3", + "version": "1.5", + "text": "Control Plane Configuration", + "node_type": "controlplane", + "tests": [ + { + "section": "3.1", + "pass": 0, + "fail": 0, + "warn": 1, + "info": 0, + "desc": "Authentication and Authorization", + "results": [ + { + "test_number": "3.1.1", + "test_desc": "Client certificate authentication should not be used for users (Not Scored)", + "audit": "", + "AuditConfig": "", + "type": "manual", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be\nimplemented in place of client certificates.\n", + "test_info": [ + "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be\nimplemented in place of client certificates.\n" + ], + "status": "WARN", + "actual_value": "", + "scored": false, + "expected_result": "", + "reason": "Test marked as a manual test" + } + ] + } + ], + "total_pass": 0, + "total_fail": 0, + "total_warn": 3, + "total_info": 0 + }, + { + "id": "1", + "version": "1.5", + "text": "Master Node Security Configuration", + "node_type": "master", + "tests": [ + { + "section": "1.1", + "pass": 15, + "fail": 1, + "warn": 5, + "info": 0, + "desc": "Master Node Configuration Files", + "results": [ + { + "test_number": "1.1.1", + "test_desc": "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored)", + "audit": "/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-apiserver.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-apiserver.yaml; fi'", + "AuditConfig": "", + "type": "", + "remediation": "Run the below command (based on the file location on your system) on the\nmaster node.\nFor example, chmod 644 /etc/kubernetes/manifests/kube-apiserver.yaml\n", + "test_info": [ + "Run the below command (based on the file location on your system) on the\nmaster node.\nFor example, chmod 644 /etc/kubernetes/manifests/kube-apiserver.yaml\n" + ], + "status": "PASS", + "actual_value": "permissions=600\n", + "scored": true, + "expected_result": "bitmask '600' AND '644'" + } + ] + } + ], + "total_pass": 42, + "total_fail": 12, + "total_warn": 11, + "total_info": 0 + } +] \ No newline at end of file diff --git a/cmd/kvisor/kubebench/testdata/passedControlsCollection.json b/cmd/kvisor/kubebench/testdata/passedControlsCollection.json new file mode 100644 index 00000000..5235dbb5 --- /dev/null +++ b/cmd/kvisor/kubebench/testdata/passedControlsCollection.json @@ -0,0 +1,77 @@ +[ + { + "id": "2", + "version": "1.15", + "text": "Etcd Node Configuration", + "node_type": "etcd", + "tests": [ + { + "section": "2", + "pass": 7, + "fail": 0, + "warn": 0, + "info": 0, + "desc": "Etcd Node Configuration Files", + "results": [ + { + "test_number": "2.1", + "test_desc": "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)", + "audit": "/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep", + "AuditConfig": "", + "type": "", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\nThen, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml\non the master node and set the below parameters.\n--cert-file=\n--key-file=\n", + "test_info": [ + "Follow the etcd service documentation and configure TLS encryption.\nThen, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml\non the master node and set the below parameters.\n--cert-file=\n--key-file=\n" + ], + "status": "PASS", + "actual_value": "root 3277 3218 3 Apr19 ? 03:57:52 etcd --advertise-client-urls=https://192.168.64.4:2379 --cert-file=/var/lib/minikube/certs/etcd/server.crt --client-cert-auth=true --data-dir=/var/lib/minikube/etcd --initial-advertise-peer-urls=https://192.168.64.4:2380 --initial-cluster=minikube=https://192.168.64.4:2380 --key-file=/var/lib/minikube/certs/etcd/server.key --listen-client-urls=https://127.0.0.1:2379,https://192.168.64.4:2379 --listen-metrics-urls=http://127.0.0.1:2381 --listen-peer-urls=https://192.168.64.4:2380 --name=minikube --peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt --peer-client-cert-auth=true --peer-key-file=/var/lib/minikube/certs/etcd/peer.key --peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt --snapshot-count=10000 --trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt\nroot 4624 4605 8 Apr21 ? 04:55:10 kube-apiserver --advertise-address=192.168.64.4 --allow-privileged=true --authorization-mode=Node,RBAC --client-ca-file=/var/lib/minikube/certs/ca.crt --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,PodSecurityPolicy --enable-bootstrap-token-auth=true --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key --requestheader-allowed-names=front-proxy-client --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=8443 --service-account-key-file=/var/lib/minikube/certs/sa.pub --service-cluster-ip-range=10.96.0.0/12 --tls-cert-file=/var/lib/minikube/certs/apiserver.crt --tls-private-key-file=/var/lib/minikube/certs/apiserver.key\n", + "scored": true, + "expected_result": "'--cert-file' is present AND '--key-file' is present" + } + ] + } + ], + "total_pass": 7, + "total_fail": 0, + "total_warn": 0, + "total_info": 0 + }, + { + "id": "3", + "version": "1.5", + "text": "Control Plane Configuration", + "node_type": "controlplane", + "tests": [ + { + "section": "3.1", + "pass": 0, + "fail": 0, + "warn": 1, + "info": 0, + "desc": "Authentication and Authorization", + "results": [ + { + "test_number": "3.1.1", + "test_desc": "Client certificate authentication should not be used for users (Not Scored)", + "audit": "", + "AuditConfig": "", + "type": "manual", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be\nimplemented in place of client certificates.\n", + "test_info": [ + "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be\nimplemented in place of client certificates.\n" + ], + "status": "WARN", + "actual_value": "", + "scored": false, + "expected_result": "", + "reason": "Test marked as a manual test" + } + ] + } + ], + "total_pass": 0, + "total_fail": 0, + "total_warn": 3, + "total_info": 0 + } +] diff --git a/cmd/kvisor/kubebench/testdata/result.json b/cmd/kvisor/kubebench/testdata/result.json new file mode 100644 index 00000000..871483da --- /dev/null +++ b/cmd/kvisor/kubebench/testdata/result.json @@ -0,0 +1,122 @@ +{ + "Controls": [ + { + "id": "1", + "version": "1.5", + "text": "Master Node Security Configuration", + "node_type": "master", + "tests": [ + { + "section": "1.1", + "pass": 15, + "fail": 1, + "warn": 5, + "info": 0, + "desc": "Master Node Configuration Files", + "results": [ + { + "test_number": "1.1.1", + "test_desc": "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored)", + "audit": "/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-apiserver.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-apiserver.yaml; fi'", + "AuditConfig": "", + "type": "", + "remediation": "Run the below command (based on the file location on your system) on the\nmaster node.\nFor example, chmod 644 /etc/kubernetes/manifests/kube-apiserver.yaml\n", + "test_info": [ + "Run the below command (based on the file location on your system) on the\nmaster node.\nFor example, chmod 644 /etc/kubernetes/manifests/kube-apiserver.yaml\n" + ], + "status": "PASS", + "actual_value": "permissions=600\n", + "scored": true, + "expected_result": "bitmask '600' AND '644'" + } + ] + } + ], + "total_pass": 42, + "total_fail": 12, + "total_warn": 11, + "total_info": 0 + }, + { + "id": "2", + "version": "1.15", + "text": "Etcd Node Configuration", + "node_type": "etcd", + "tests": [ + { + "section": "2", + "pass": 7, + "fail": 0, + "warn": 0, + "info": 0, + "desc": "Etcd Node Configuration Files", + "results": [ + { + "test_number": "2.1", + "test_desc": "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)", + "audit": "/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep", + "AuditConfig": "", + "type": "", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\nThen, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml\non the master node and set the below parameters.\n--cert-file=\n--key-file=\n", + "test_info": [ + "Follow the etcd service documentation and configure TLS encryption.\nThen, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml\non the master node and set the below parameters.\n--cert-file=\n--key-file=\n" + ], + "status": "PASS", + "actual_value": "root 3277 3218 3 Apr19 ? 03:57:52 etcd --advertise-client-urls=https://192.168.64.4:2379 --cert-file=/var/lib/minikube/certs/etcd/server.crt --client-cert-auth=true --data-dir=/var/lib/minikube/etcd --initial-advertise-peer-urls=https://192.168.64.4:2380 --initial-cluster=minikube=https://192.168.64.4:2380 --key-file=/var/lib/minikube/certs/etcd/server.key --listen-client-urls=https://127.0.0.1:2379,https://192.168.64.4:2379 --listen-metrics-urls=http://127.0.0.1:2381 --listen-peer-urls=https://192.168.64.4:2380 --name=minikube --peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt --peer-client-cert-auth=true --peer-key-file=/var/lib/minikube/certs/etcd/peer.key --peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt --snapshot-count=10000 --trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt\nroot 4624 4605 8 Apr21 ? 04:55:10 kube-apiserver --advertise-address=192.168.64.4 --allow-privileged=true --authorization-mode=Node,RBAC --client-ca-file=/var/lib/minikube/certs/ca.crt --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,PodSecurityPolicy --enable-bootstrap-token-auth=true --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key --requestheader-allowed-names=front-proxy-client --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=8443 --service-account-key-file=/var/lib/minikube/certs/sa.pub --service-cluster-ip-range=10.96.0.0/12 --tls-cert-file=/var/lib/minikube/certs/apiserver.crt --tls-private-key-file=/var/lib/minikube/certs/apiserver.key\n", + "scored": true, + "expected_result": "'--cert-file' is present AND '--key-file' is present" + } + ] + } + ], + "total_pass": 7, + "total_fail": 0, + "total_warn": 0, + "total_info": 0 + }, + { + "id": "3", + "version": "1.5", + "text": "Control Plane Configuration", + "node_type": "controlplane", + "tests": [ + { + "section": "3.1", + "pass": 0, + "fail": 0, + "warn": 1, + "info": 0, + "desc": "Authentication and Authorization", + "results": [ + { + "test_number": "3.1.1", + "test_desc": "Client certificate authentication should not be used for users (Not Scored)", + "audit": "", + "AuditConfig": "", + "type": "manual", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be\nimplemented in place of client certificates.\n", + "test_info": [ + "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be\nimplemented in place of client certificates.\n" + ], + "status": "WARN", + "actual_value": "", + "scored": false, + "expected_result": "", + "reason": "Test marked as a manual test" + } + ] + } + ], + "total_pass": 0, + "total_fail": 0, + "total_warn": 3, + "total_info": 0 + } + ], + "Totals": { + "total_pass": 49, + "total_fail": 12, + "total_warn": 14, + "total_info": 0 + } +} diff --git a/cmd/kvisor/kubebench/testdata/result_no_totals.json b/cmd/kvisor/kubebench/testdata/result_no_totals.json new file mode 100644 index 00000000..6589a507 --- /dev/null +++ b/cmd/kvisor/kubebench/testdata/result_no_totals.json @@ -0,0 +1,114 @@ +[ + { + "id": "1", + "version": "1.5", + "text": "Master Node Security Configuration", + "node_type": "master", + "tests": [ + { + "section": "1.1", + "pass": 15, + "fail": 1, + "warn": 5, + "info": 0, + "desc": "Master Node Configuration Files", + "results": [ + { + "test_number": "1.1.1", + "test_desc": "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored)", + "audit": "/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-apiserver.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-apiserver.yaml; fi'", + "AuditConfig": "", + "type": "", + "remediation": "Run the below command (based on the file location on your system) on the\nmaster node.\nFor example, chmod 644 /etc/kubernetes/manifests/kube-apiserver.yaml\n", + "test_info": [ + "Run the below command (based on the file location on your system) on the\nmaster node.\nFor example, chmod 644 /etc/kubernetes/manifests/kube-apiserver.yaml\n" + ], + "status": "PASS", + "actual_value": "permissions=600\n", + "scored": true, + "expected_result": "bitmask '600' AND '644'" + } + ] + } + ], + "total_pass": 42, + "total_fail": 12, + "total_warn": 11, + "total_info": 0 + }, + { + "id": "2", + "version": "1.15", + "text": "Etcd Node Configuration", + "node_type": "etcd", + "tests": [ + { + "section": "2", + "pass": 7, + "fail": 0, + "warn": 0, + "info": 0, + "desc": "Etcd Node Configuration Files", + "results": [ + { + "test_number": "2.1", + "test_desc": "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)", + "audit": "/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep", + "AuditConfig": "", + "type": "", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\nThen, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml\non the master node and set the below parameters.\n--cert-file=\n--key-file=\n", + "test_info": [ + "Follow the etcd service documentation and configure TLS encryption.\nThen, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml\non the master node and set the below parameters.\n--cert-file=\n--key-file=\n" + ], + "status": "PASS", + "actual_value": "root 3277 3218 3 Apr19 ? 03:57:52 etcd --advertise-client-urls=https://192.168.64.4:2379 --cert-file=/var/lib/minikube/certs/etcd/server.crt --client-cert-auth=true --data-dir=/var/lib/minikube/etcd --initial-advertise-peer-urls=https://192.168.64.4:2380 --initial-cluster=minikube=https://192.168.64.4:2380 --key-file=/var/lib/minikube/certs/etcd/server.key --listen-client-urls=https://127.0.0.1:2379,https://192.168.64.4:2379 --listen-metrics-urls=http://127.0.0.1:2381 --listen-peer-urls=https://192.168.64.4:2380 --name=minikube --peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt --peer-client-cert-auth=true --peer-key-file=/var/lib/minikube/certs/etcd/peer.key --peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt --snapshot-count=10000 --trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt\nroot 4624 4605 8 Apr21 ? 04:55:10 kube-apiserver --advertise-address=192.168.64.4 --allow-privileged=true --authorization-mode=Node,RBAC --client-ca-file=/var/lib/minikube/certs/ca.crt --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,PodSecurityPolicy --enable-bootstrap-token-auth=true --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key --requestheader-allowed-names=front-proxy-client --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=8443 --service-account-key-file=/var/lib/minikube/certs/sa.pub --service-cluster-ip-range=10.96.0.0/12 --tls-cert-file=/var/lib/minikube/certs/apiserver.crt --tls-private-key-file=/var/lib/minikube/certs/apiserver.key\n", + "scored": true, + "expected_result": "'--cert-file' is present AND '--key-file' is present" + } + ] + } + ], + "total_pass": 7, + "total_fail": 0, + "total_warn": 0, + "total_info": 0 + }, + { + "id": "3", + "version": "1.5", + "text": "Control Plane Configuration", + "node_type": "controlplane", + "tests": [ + { + "section": "3.1", + "pass": 0, + "fail": 0, + "warn": 1, + "info": 0, + "desc": "Authentication and Authorization", + "results": [ + { + "test_number": "3.1.1", + "test_desc": "Client certificate authentication should not be used for users (Not Scored)", + "audit": "", + "AuditConfig": "", + "type": "manual", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be\nimplemented in place of client certificates.\n", + "test_info": [ + "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be\nimplemented in place of client certificates.\n" + ], + "status": "WARN", + "actual_value": "", + "scored": false, + "expected_result": "", + "reason": "Test marked as a manual test" + } + ] + } + ], + "total_pass": 0, + "total_fail": 0, + "total_warn": 3, + "total_info": 0 + } +] \ No newline at end of file diff --git a/cmd/kvisor/kubebench/util.go b/cmd/kvisor/kubebench/util.go new file mode 100644 index 00000000..7cce1433 --- /dev/null +++ b/cmd/kvisor/kubebench/util.go @@ -0,0 +1,537 @@ +package kubebench + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/castai/kvisor/cmd/kvisor/kubebench/check" + "github.com/fatih/color" + "github.com/golang/glog" + "github.com/spf13/viper" +) + +// Print colors +var colors = map[check.State]*color.Color{ + check.PASS: color.New(color.FgGreen), + check.FAIL: color.New(color.FgRed), + check.WARN: color.New(color.FgYellow), + check.INFO: color.New(color.FgBlue), +} + +var ( + psFunc func(string) string + statFunc func(string) (os.FileInfo, error) + getBinariesFunc func(*viper.Viper, check.NodeType) (map[string]string, error) + TypeMap = map[string][]string{ + "ca": {"cafile", "defaultcafile"}, + "kubeconfig": {"kubeconfig", "defaultkubeconfig"}, + "service": {"svc", "defaultsvc"}, + "config": {"confs", "defaultconf"}, + "datadir": {"datadirs", "defaultdatadir"}, + } +) + +func init() { + psFunc = ps + statFunc = os.Stat + getBinariesFunc = getBinaries +} + +type Platform struct { + Name string + Version string +} + +func (p Platform) String() string { + return fmt.Sprintf("Platform{ Name: %s Version: %s }", p.Name, p.Version) +} + +func exitWithError(err error) { + fmt.Fprintf(os.Stderr, "\n%v\n", err) + // flush before exit non-zero + glog.Flush() + os.Exit(1) +} + +func cleanIDs(list string) map[string]bool { + list = strings.Trim(list, ",") + ids := strings.Split(list, ",") + + set := make(map[string]bool) + + for _, id := range ids { + id = strings.Trim(id, " ") + set[id] = true + } + + return set +} + +// ps execs out to the ps command; it's separated into a function so we can write tests +func ps(proc string) string { + // TODO: truncate proc to 15 chars + // See https://github.com/aquasecurity/kube-bench/issues/328#issuecomment-506813344 + glog.V(2).Info(fmt.Sprintf("ps - proc: %q", proc)) + cmd := exec.Command("/bin/ps", "-C", proc, "-o", "cmd", "--no-headers") + out, err := cmd.Output() + if err != nil { + glog.V(2).Info(fmt.Errorf("%s: %s", cmd.Args, err)) + } + + glog.V(2).Info(fmt.Sprintf("ps - returning: %q", string(out))) + return string(out) +} + +// getBinaries finds which of the set of candidate executables are running. +// It returns an error if one mandatory executable is not running. +func getBinaries(v *viper.Viper, nodetype check.NodeType) (map[string]string, error) { + binmap := make(map[string]string) + + for _, component := range v.GetStringSlice("components") { + s := v.Sub(component) + if s == nil { + continue + } + + optional := s.GetBool("optional") + bins := s.GetStringSlice("bins") + if len(bins) > 0 { + bin, err := findExecutable(bins) + if err != nil && !optional { + glog.V(1).Info(buildComponentMissingErrorMessage(nodetype, component, bins)) + return nil, fmt.Errorf("unable to detect running programs for component %q", component) + } + + // Default the executable name that we'll substitute to the name of the component + if bin == "" { + bin = component + glog.V(2).Info(fmt.Sprintf("Component %s not running", component)) + } else { + glog.V(2).Info(fmt.Sprintf("Component %s uses running binary %s", component, bin)) + } + binmap[component] = bin + } + } + + return binmap, nil +} + +// getConfigFilePath locates the config files we should be using for CIS version +func getConfigFilePath(benchmarkVersion string, filename string) (path string, err error) { + glog.V(2).Info(fmt.Sprintf("Looking for config specific CIS version %q", benchmarkVersion)) + + path = filepath.Join(cfgDir, benchmarkVersion) + file := filepath.Join(path, filename) + glog.V(2).Info(fmt.Sprintf("Looking for file: %s", file)) + + if _, err := os.Stat(file); err != nil { + glog.V(2).Infof("error accessing config file: %q error: %v\n", file, err) + return "", fmt.Errorf("no test files found <= benchmark version: %s", benchmarkVersion) + } + + return path, nil +} + +// getYamlFilesFromDir returns a list of yaml files in the specified directory, ignoring config.yaml +func getYamlFilesFromDir(path string) (names []string, err error) { + err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + _, name := filepath.Split(path) + if name != "" && name != "config.yaml" && filepath.Ext(name) == ".yaml" { + names = append(names, path) + } + + return nil + }) + return names, err +} + +// decrementVersion decrements the version number +// We want to decrement individually even through versions where we don't supply test files +// just in case someone wants to specify their own test files for that version +func decrementVersion(version string) string { + split := strings.Split(version, ".") + if len(split) < 2 { + return "" + } + minor, err := strconv.Atoi(split[1]) + if err != nil { + return "" + } + if minor <= 1 { + return "" + } + split[1] = strconv.Itoa(minor - 1) + return strings.Join(split, ".") +} + +// getFiles finds which of the set of candidate files exist +func getFiles(v *viper.Viper, fileType string) map[string]string { + filemap := make(map[string]string) + mainOpt := TypeMap[fileType][0] + defaultOpt := TypeMap[fileType][1] + + for _, component := range v.GetStringSlice("components") { + s := v.Sub(component) + if s == nil { + continue + } + + // See if any of the candidate files exist + file := findConfigFile(s.GetStringSlice(mainOpt)) + if file == "" { + if s.IsSet(defaultOpt) { + file = s.GetString(defaultOpt) + glog.V(2).Info(fmt.Sprintf("Using default %s file name '%s' for component %s", fileType, file, component)) + } else { + // Default the file name that we'll substitute to the name of the component + glog.V(2).Info(fmt.Sprintf("Missing %s file for %s", fileType, component)) + file = component + } + } else { + glog.V(2).Info(fmt.Sprintf("Component %s uses %s file '%s'", component, fileType, file)) + } + + filemap[component] = file + } + + return filemap +} + +// verifyBin checks that the binary specified is running +func verifyBin(bin string) bool { + // Strip any quotes + bin = strings.Trim(bin, "'\"") + + // bin could consist of more than one word + // We'll search for running processes with the first word, and then check the whole + // proc as supplied is included in the results + proc := strings.Fields(bin)[0] + out := psFunc(proc) + + // There could be multiple lines in the ps output + // The binary needs to be the first word in the ps output, except that it could be preceded by a path + // e.g. /usr/bin/kubelet is a match for kubelet + // but apiserver is not a match for kube-apiserver + reFirstWord := regexp.MustCompile(`^(\S*\/)*` + bin) + lines := strings.Split(out, "\n") + for _, l := range lines { + glog.V(3).Info(fmt.Sprintf("reFirstWord.Match(%s)", l)) + if reFirstWord.Match([]byte(l)) { + return true + } + } + + return false +} + +// fundConfigFile looks through a list of possible config files and finds the first one that exists +func findConfigFile(candidates []string) string { + for _, c := range candidates { + _, err := statFunc(c) + if err == nil { + return c + } + if !os.IsNotExist(err) && !strings.HasSuffix(err.Error(), "not a directory") { + exitWithError(fmt.Errorf("error looking for file %s: %v", c, err)) + } + } + + return "" +} + +// findExecutable looks through a list of possible executable names and finds the first one that's running +func findExecutable(candidates []string) (string, error) { + for _, c := range candidates { + if verifyBin(c) { + return c, nil + } + glog.V(1).Info(fmt.Sprintf("executable '%s' not running", c)) + } + + return "", fmt.Errorf("no candidates running") +} + +func multiWordReplace(s string, subname string, sub string) string { + f := strings.Fields(sub) + if len(f) > 1 { + sub = "'" + sub + "'" + } + + return strings.Replace(s, subname, sub, -1) +} + +const missingKubectlKubeletMessage = ` +Unable to find the programs kubectl or kubelet in the PATH. +These programs are used to determine which version of Kubernetes is running. +Make sure the /usr/local/mount-from-host/bin directory is mapped to the container, +either in the job.yaml file, or Docker command. + +For job.yaml: +... +- name: usr-bin + mountPath: /usr/local/mount-from-host/bin +... + +For docker command: + docker -v $(which kubectl):/usr/local/mount-from-host/bin/kubectl .... + +Alternatively, you can specify the version with --version + kube-bench --version ... +` + +func getKubeVersion() (*KubeVersion, error) { + if k8sVer, err := getKubeVersionFromRESTAPI(); err == nil { + glog.V(2).Info(fmt.Sprintf("Kubernetes REST API Reported version: %s", k8sVer)) + return k8sVer, nil + } + + // These executables might not be on the user's path. + _, err := exec.LookPath("kubectl") + if err != nil { + glog.V(3).Infof("Error locating kubectl: %s", err) + _, err = exec.LookPath("kubelet") + if err != nil { + glog.V(3).Infof("Error locating kubelet: %s", err) + // Search for the kubelet binary all over the filesystem and run the first match to get the kubernetes version + cmd := exec.Command("/bin/sh", "-c", "`find / -type f -executable -name kubelet 2>/dev/null | grep -m1 .` --version") + out, err := cmd.CombinedOutput() + if err == nil { + glog.V(3).Infof("Found kubelet and query kubernetes version is: %s", string(out)) + return getVersionFromKubeletOutput(string(out)), nil + } + + glog.Warning(missingKubectlKubeletMessage) + glog.V(1).Info("unable to find the programs kubectl or kubelet in the PATH") + glog.V(1).Infof("Cant detect version, assuming default %s", defaultKubeVersion) + return &KubeVersion{baseVersion: defaultKubeVersion}, nil + } + return getKubeVersionFromKubelet(), nil + } + + return getKubeVersionFromKubectl(), nil +} + +func getKubeVersionFromKubectl() *KubeVersion { + cmd := exec.Command("kubectl", "version", "-o", "json") + out, err := cmd.CombinedOutput() + if err != nil { + glog.V(2).Infof("Failed to query kubectl: %s", err) + glog.V(2).Info(err) + } + + return getVersionFromKubectlOutput(string(out)) +} + +func getKubeVersionFromKubelet() *KubeVersion { + cmd := exec.Command("kubelet", "--version") + out, err := cmd.CombinedOutput() + if err != nil { + glog.V(2).Infof("Failed to query kubelet: %s", err) + glog.V(2).Info(err) + } + + return getVersionFromKubeletOutput(string(out)) +} + +func getVersionFromKubectlOutput(s string) *KubeVersion { + glog.V(2).Infof("Kubectl output: %s", s) + type versionResult struct { + ServerVersion VersionResponse + } + vrObj := &versionResult{} + if err := json.Unmarshal([]byte(s), vrObj); err != nil { + glog.V(2).Info(err) + if strings.Contains(s, "The connection to the server") { + msg := fmt.Sprintf(`Warning: Kubernetes version was not auto-detected because kubectl could not connect to the Kubernetes server. This may be because the kubeconfig information is missing or has credentials that do not match the server. Assuming default version %s`, defaultKubeVersion) + fmt.Fprintln(os.Stderr, msg) + } + glog.V(1).Info(fmt.Sprintf("Unable to get Kubernetes version from kubectl, using default version: %s", defaultKubeVersion)) + return &KubeVersion{baseVersion: defaultKubeVersion} + } + sv := vrObj.ServerVersion + return &KubeVersion{ + Major: sv.Major, + Minor: sv.Minor, + GitVersion: sv.GitVersion, + } +} + +func getVersionFromKubeletOutput(s string) *KubeVersion { + glog.V(2).Infof("Kubelet output: %s", s) + serverVersionRe := regexp.MustCompile(`Kubernetes v(\d+.\d+)`) + subs := serverVersionRe.FindStringSubmatch(s) + if len(subs) < 2 { + glog.V(1).Info(fmt.Sprintf("Unable to get Kubernetes version from kubelet, using default version: %s", defaultKubeVersion)) + return &KubeVersion{baseVersion: defaultKubeVersion} + } + return &KubeVersion{baseVersion: subs[1]} +} + +func makeSubstitutions(s string, ext string, m map[string]string) (string, []string) { + substitutions := make([]string, 0) + for k, v := range m { + subst := "$" + k + ext + if v == "" { + glog.V(2).Info(fmt.Sprintf("No substitution for '%s'\n", subst)) + continue + } + glog.V(2).Info(fmt.Sprintf("Substituting %s with '%s'\n", subst, v)) + beforeS := s + s = multiWordReplace(s, subst, v) + if beforeS != s { + substitutions = append(substitutions, v) + } + } + + return s, substitutions +} + +func isEmpty(str string) bool { + return strings.TrimSpace(str) == "" +} + +func buildComponentMissingErrorMessage(nodetype check.NodeType, component string, bins []string) string { + errMessageTemplate := ` +Unable to detect running programs for component %q +The following %q programs have been searched, but none of them have been found: +%s + +These program names are provided in the config.yaml, section '%s.%s.bins' +` + + var componentRoleName, componentType string + switch nodetype { + + case check.NODE: + componentRoleName = "worker node" + componentType = "node" + case check.ETCD: + componentRoleName = "etcd node" + componentType = "etcd" + default: + componentRoleName = "master node" + componentType = "master" + } + + binList := "" + for _, bin := range bins { + binList = fmt.Sprintf("%s\t- %s\n", binList, bin) + } + + return fmt.Sprintf(errMessageTemplate, component, componentRoleName, binList, componentType, component) +} + +func getPlatformInfo() Platform { + + openShiftInfo := getOpenShiftInfo() + if openShiftInfo.Name != "" && openShiftInfo.Version != "" { + return openShiftInfo + } + + kv, err := getKubeVersion() + if err != nil { + glog.V(2).Info(err) + return Platform{} + } + return getPlatformInfoFromVersion(kv.GitVersion) +} + +func getPlatformInfoFromVersion(s string) Platform { + versionRe := regexp.MustCompile(`v(\d+\.\d+)\.\d+[-+](\w+)(?:[.\-])\w+`) + subs := versionRe.FindStringSubmatch(s) + if len(subs) < 3 { + return Platform{} + } + return Platform{ + Name: subs[2], + Version: subs[1], + } +} + +func getPlatformBenchmarkVersion(platform Platform) string { + glog.V(3).Infof("getPlatformBenchmarkVersion platform: %s", platform) + switch platform.Name { + case "eks": + return "eks-1.2.0" + case "gke": + switch platform.Version { + case "1.15", "1.16", "1.17", "1.18", "1.19": + return "gke-1.0" + default: + return "gke-1.2.0" + } + case "aliyun": + return "ack-1.0" + case "ocp": + switch platform.Version { + case "3.10": + return "rh-0.7" + case "4.1": + return "rh-1.0" + } + case "vmware": + return "tkgi-1.2.53" + } + return "" +} + +func getOpenShiftInfo() Platform { + glog.V(1).Info("Checking for oc") + _, err := exec.LookPath("oc") + + if err == nil { + cmd := exec.Command("oc", "version") + out, err := cmd.CombinedOutput() + + if err == nil { + versionRe := regexp.MustCompile(`oc v(\d+\.\d+)`) + subs := versionRe.FindStringSubmatch(string(out)) + if len(subs) < 1 { + versionRe = regexp.MustCompile(`Client Version:\s*(\d+\.\d+)`) + subs = versionRe.FindStringSubmatch(string(out)) + } + if len(subs) > 1 { + glog.V(2).Infof("OCP output '%s' \nplatform is %s \nocp %v", string(out), getPlatformInfoFromVersion(string(out)), subs[1]) + ocpBenchmarkVersion, err := getOcpValidVersion(subs[1]) + if err == nil { + return Platform{Name: "ocp", Version: ocpBenchmarkVersion} + } else { + glog.V(1).Infof("Can't get getOcpValidVersion: %v", err) + } + } else { + glog.V(1).Infof("Can't parse version output: %v", subs) + } + } else { + glog.V(1).Infof("Can't use oc command: %v", err) + } + } else { + glog.V(1).Infof("Can't find oc command: %v", err) + } + return Platform{} +} + +func getOcpValidVersion(ocpVer string) (string, error) { + ocpOriginal := ocpVer + + for !isEmpty(ocpVer) { + glog.V(3).Info(fmt.Sprintf("getOcpBenchmarkVersion check for ocp: %q \n", ocpVer)) + if ocpVer == "3.10" || ocpVer == "4.1" { + glog.V(1).Info(fmt.Sprintf("getOcpBenchmarkVersion found valid version for ocp: %q \n", ocpVer)) + return ocpVer, nil + } + ocpVer = decrementVersion(ocpVer) + } + + glog.V(1).Info(fmt.Sprintf("getOcpBenchmarkVersion unable to find a match for: %q", ocpOriginal)) + return "", fmt.Errorf("unable to find a matching Benchmark Version match for ocp version: %s", ocpOriginal) +} diff --git a/cmd/kvisor/kubebench/util_test.go b/cmd/kvisor/kubebench/util_test.go new file mode 100644 index 00000000..fe53257d --- /dev/null +++ b/cmd/kvisor/kubebench/util_test.go @@ -0,0 +1,733 @@ +// Copyright © 2017 Aqua Security Software Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubebench + +import ( + "errors" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strconv" + "testing" + + "github.com/castai/kvisor/cmd/kvisor/kubebench/check" + "github.com/magiconair/properties/assert" + + "github.com/spf13/viper" +) + +var ( + g string + e []error + eIndex int +) + +func fakeps(proc string) string { + return g +} + +func fakestat(file string) (os.FileInfo, error) { + err := e[eIndex] + eIndex++ + return nil, err +} + +func TestVerifyBin(t *testing.T) { + cases := []struct { + proc string + psOut string + exp bool + }{ + {proc: "single", psOut: "single", exp: true}, + {proc: "single", psOut: "", exp: false}, + {proc: "two words", psOut: "two words", exp: true}, + {proc: "two words", psOut: "", exp: false}, + {proc: "cmd", psOut: "cmd param1 param2", exp: true}, + {proc: "cmd param", psOut: "cmd param1 param2", exp: true}, + {proc: "cmd param", psOut: "cmd", exp: false}, + {proc: "cmd", psOut: "cmd x \ncmd y", exp: true}, + {proc: "cmd y", psOut: "cmd x \ncmd y", exp: true}, + {proc: "cmd", psOut: "/usr/bin/cmd", exp: true}, + {proc: "cmd", psOut: "kube-cmd", exp: false}, + {proc: "cmd", psOut: "/usr/bin/kube-cmd", exp: false}, + } + + psFunc = fakeps + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + g = c.psOut + v := verifyBin(c.proc) + if v != c.exp { + t.Fatalf("Expected %v got %v", c.exp, v) + } + }) + } +} + +func TestFindExecutable(t *testing.T) { + cases := []struct { + candidates []string // list of executables we'd consider + psOut string // fake output from ps + exp string // the one we expect to find in the (fake) ps output + expErr bool + }{ + {candidates: []string{"one", "two", "three"}, psOut: "two", exp: "two"}, + {candidates: []string{"one", "two", "three"}, psOut: "two three", exp: "two"}, + {candidates: []string{"one double", "two double", "three double"}, psOut: "two double is running", exp: "two double"}, + {candidates: []string{"one", "two", "three"}, psOut: "blah", expErr: true}, + {candidates: []string{"one double", "two double", "three double"}, psOut: "two", expErr: true}, + {candidates: []string{"apiserver", "kube-apiserver"}, psOut: "kube-apiserver", exp: "kube-apiserver"}, + {candidates: []string{"apiserver", "kube-apiserver", "hyperkube-apiserver"}, psOut: "kube-apiserver", exp: "kube-apiserver"}, + } + + psFunc = fakeps + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + g = c.psOut + e, err := findExecutable(c.candidates) + if e != c.exp { + t.Fatalf("Expected %v got %v", c.exp, e) + } + + if err == nil && c.expErr { + t.Fatalf("Expected error") + } + + if err != nil && !c.expErr { + t.Fatalf("Didn't expect error: %v", err) + } + }) + } +} + +func TestGetBinaries(t *testing.T) { + cases := []struct { + config map[string]interface{} + psOut string + exp map[string]string + expectErr bool + }{ + { + config: map[string]interface{}{"components": []string{"apiserver"}, "apiserver": map[string]interface{}{"bins": []string{"apiserver", "kube-apiserver"}}}, + psOut: "kube-apiserver", + exp: map[string]string{"apiserver": "kube-apiserver"}, + expectErr: false, + }, + { + // "thing" is not in the list of components + config: map[string]interface{}{"components": []string{"apiserver"}, "apiserver": map[string]interface{}{"bins": []string{"apiserver", "kube-apiserver"}}, "thing": map[string]interface{}{"bins": []string{"something else", "thing"}}}, + psOut: "kube-apiserver thing", + exp: map[string]string{"apiserver": "kube-apiserver"}, + expectErr: false, + }, + { + // "anotherthing" in list of components but doesn't have a definition + config: map[string]interface{}{"components": []string{"apiserver", "anotherthing"}, "apiserver": map[string]interface{}{"bins": []string{"apiserver", "kube-apiserver"}}, "thing": map[string]interface{}{"bins": []string{"something else", "thing"}}}, + psOut: "kube-apiserver thing", + exp: map[string]string{"apiserver": "kube-apiserver"}, + expectErr: false, + }, + { + // more than one component + config: map[string]interface{}{"components": []string{"apiserver", "thing"}, "apiserver": map[string]interface{}{"bins": []string{"apiserver", "kube-apiserver"}}, "thing": map[string]interface{}{"bins": []string{"something else", "thing"}}}, + psOut: "kube-apiserver \nthing", + exp: map[string]string{"apiserver": "kube-apiserver", "thing": "thing"}, + expectErr: false, + }, + { + // default binary to component name + config: map[string]interface{}{"components": []string{"apiserver", "thing"}, "apiserver": map[string]interface{}{"bins": []string{"apiserver", "kube-apiserver"}}, "thing": map[string]interface{}{"bins": []string{"something else", "thing"}, "optional": true}}, + psOut: "kube-apiserver \notherthing some params", + exp: map[string]string{"apiserver": "kube-apiserver", "thing": "thing"}, + expectErr: false, + }, + { + // missing mandatory component + config: map[string]interface{}{"components": []string{"apiserver", "thing"}, "apiserver": map[string]interface{}{"bins": []string{"apiserver", "kube-apiserver"}}, "thing": map[string]interface{}{"bins": []string{"something else", "thing"}, "optional": true}}, + psOut: "otherthing some params", + exp: map[string]string{"apiserver": "kube-apiserver", "thing": "thing"}, + expectErr: true, + }, + } + + v := viper.New() + psFunc = fakeps + + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + g = c.psOut + for k, val := range c.config { + v.Set(k, val) + } + m, err := getBinaries(v, check.MASTER) + if c.expectErr { + if err == nil { + t.Fatal("Got nil Expected error") + } + } else if !reflect.DeepEqual(m, c.exp) { + t.Fatalf("Got %v\nExpected %v", m, c.exp) + } + }) + } +} + +func TestMultiWordReplace(t *testing.T) { + cases := []struct { + input string + sub string + subname string + output string + }{ + {input: "Here's a file with no substitutions", sub: "blah", subname: "blah", output: "Here's a file with no substitutions"}, + {input: "Here's a file with a substitution", sub: "blah", subname: "substitution", output: "Here's a file with a blah"}, + {input: "Here's a file with multi-word substitutions", sub: "multi word", subname: "multi-word", output: "Here's a file with 'multi word' substitutions"}, + {input: "Here's a file with several several substitutions several", sub: "blah", subname: "several", output: "Here's a file with blah blah substitutions blah"}, + } + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + s := multiWordReplace(c.input, c.subname, c.sub) + if s != c.output { + t.Fatalf("Expected %s got %s", c.output, s) + } + }) + } +} + +func Test_getVersionFromKubectlOutput(t *testing.T) { + ver := getVersionFromKubectlOutput(`{ + "serverVersion": { + "major": "1", + "minor": "8", + "gitVersion": "v1.8.0" + } +}`) + if ver.BaseVersion() != "1.8" { + t.Fatalf("Expected 1.8 got %s", ver.BaseVersion()) + } + + ver = getVersionFromKubectlOutput("Something completely different") + if ver.BaseVersion() != defaultKubeVersion { + t.Fatalf("Expected %s got %s", defaultKubeVersion, ver.BaseVersion()) + } +} + +func TestFindConfigFile(t *testing.T) { + cases := []struct { + input []string + statResults []error + exp string + }{ + {input: []string{"myfile"}, statResults: []error{nil}, exp: "myfile"}, + {input: []string{"thisfile", "thatfile"}, statResults: []error{os.ErrNotExist, nil}, exp: "thatfile"}, + {input: []string{"thisfile", "thatfile"}, statResults: []error{os.ErrNotExist, os.ErrNotExist}, exp: ""}, + {input: []string{"thisfile", "/etc/dummy/thatfile"}, statResults: []error{os.ErrNotExist, errors.New("stat /etc/dummy/thatfile: not a directory")}, exp: ""}, + } + + statFunc = fakestat + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + e = c.statResults + eIndex = 0 + conf := findConfigFile(c.input) + if conf != c.exp { + t.Fatalf("Got %s expected %s", conf, c.exp) + } + }) + } +} + +func TestGetConfigFiles(t *testing.T) { + cases := []struct { + config map[string]interface{} + exp map[string]string + statResults []error + }{ + { + config: map[string]interface{}{"components": []string{"apiserver"}, "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}}, + statResults: []error{os.ErrNotExist, nil}, + exp: map[string]string{"apiserver": "kube-apiserver"}, + }, + { + // Component "thing" isn't included in the list of components + config: map[string]interface{}{ + "components": []string{"apiserver"}, + "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}, + }, + statResults: []error{os.ErrNotExist, nil}, + exp: map[string]string{"apiserver": "kube-apiserver"}, + }, + { + // More than one component + config: map[string]interface{}{ + "components": []string{"apiserver", "thing"}, + "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}, + }, + statResults: []error{os.ErrNotExist, nil, nil}, + exp: map[string]string{"apiserver": "kube-apiserver", "thing": "/my/file/thing"}, + }, + { + // Default thing to specified default config + config: map[string]interface{}{ + "components": []string{"apiserver", "thing"}, + "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}, "defaultconf": "another/thing"}, + }, + statResults: []error{os.ErrNotExist, nil, os.ErrNotExist}, + exp: map[string]string{"apiserver": "kube-apiserver", "thing": "another/thing"}, + }, + { + // Default thing to component name + config: map[string]interface{}{ + "components": []string{"apiserver", "thing"}, + "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}, + }, + statResults: []error{os.ErrNotExist, nil, os.ErrNotExist}, + exp: map[string]string{"apiserver": "kube-apiserver", "thing": "thing"}, + }, + } + + v := viper.New() + statFunc = fakestat + + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + for k, val := range c.config { + v.Set(k, val) + } + e = c.statResults + eIndex = 0 + + m := getFiles(v, "config") + if !reflect.DeepEqual(m, c.exp) { + t.Fatalf("Got %v\nExpected %v", m, c.exp) + } + }) + } +} + +func TestGetServiceFiles(t *testing.T) { + cases := []struct { + config map[string]interface{} + exp map[string]string + statResults []error + }{ + { + config: map[string]interface{}{ + "components": []string{"kubelet"}, + "kubelet": map[string]interface{}{"svc": []string{"kubelet", "10-kubeadm.conf"}}, + }, + statResults: []error{os.ErrNotExist, nil}, + exp: map[string]string{"kubelet": "10-kubeadm.conf"}, + }, + { + // Component "thing" isn't included in the list of components + config: map[string]interface{}{ + "components": []string{"kubelet"}, + "kubelet": map[string]interface{}{"svc": []string{"kubelet", "10-kubeadm.conf"}}, + "thing": map[string]interface{}{"svc": []string{"/my/file/thing"}}, + }, + statResults: []error{os.ErrNotExist, nil}, + exp: map[string]string{"kubelet": "10-kubeadm.conf"}, + }, + { + // More than one component + config: map[string]interface{}{ + "components": []string{"kubelet", "thing"}, + "kubelet": map[string]interface{}{"svc": []string{"kubelet", "10-kubeadm.conf"}}, + "thing": map[string]interface{}{"svc": []string{"/my/file/thing"}}, + }, + statResults: []error{os.ErrNotExist, nil, nil}, + exp: map[string]string{"kubelet": "10-kubeadm.conf", "thing": "/my/file/thing"}, + }, + { + // Default thing to specified default service + config: map[string]interface{}{ + "components": []string{"kubelet", "thing"}, + "kubelet": map[string]interface{}{"svc": []string{"kubelet", "10-kubeadm.conf"}}, + "thing": map[string]interface{}{"svc": []string{"/my/file/thing"}, "defaultsvc": "another/thing"}, + }, + statResults: []error{os.ErrNotExist, nil, os.ErrNotExist}, + exp: map[string]string{"kubelet": "10-kubeadm.conf", "thing": "another/thing"}, + }, + { + // Default thing to component name + config: map[string]interface{}{ + "components": []string{"kubelet", "thing"}, + "kubelet": map[string]interface{}{"svc": []string{"kubelet", "10-kubeadm.conf"}}, + "thing": map[string]interface{}{"svc": []string{"/my/file/thing"}}, + }, + statResults: []error{os.ErrNotExist, nil, os.ErrNotExist}, + exp: map[string]string{"kubelet": "10-kubeadm.conf", "thing": "thing"}, + }, + } + + v := viper.New() + statFunc = fakestat + + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + for k, val := range c.config { + v.Set(k, val) + } + e = c.statResults + eIndex = 0 + + m := getFiles(v, "service") + if !reflect.DeepEqual(m, c.exp) { + t.Fatalf("Got %v\nExpected %v", m, c.exp) + } + }) + } +} + +func TestGetDatadirFiles(t *testing.T) { + var err error + datadir, err := ioutil.TempDir("", "kube-bench-test-etcd-data-dir") + if err != nil { + t.Fatalf("Failed to create temp directory") + } + defer os.RemoveAll(datadir) + + cases := []struct { + config map[string]interface{} + exp map[string]string + statResults []error + }{ + { + config: map[string]interface{}{ + "components": []string{"etcd"}, + "etcd": map[string]interface{}{"datadirs": []string{datadir}, + "defaultdatadir": "/var/lib/etcd/default.etcd"}, + }, + statResults: []error{nil}, + exp: map[string]string{"etcd": datadir}, + }, + // fallback to defaultdatadir + { + config: map[string]interface{}{ + "components": []string{"etcd"}, + "etcd": map[string]interface{}{"datadirs": []string{"/path/to/etcd/data.etcd"}, + "defaultdatadir": "/var/lib/etcd/default.etcd"}, + }, + statResults: []error{os.ErrNotExist}, + exp: map[string]string{"etcd": "/var/lib/etcd/default.etcd"}, + }, + } + + v := viper.New() + statFunc = fakestat + + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + for k, val := range c.config { + v.Set(k, val) + } + e = c.statResults + eIndex = 0 + m := getFiles(v, "datadir") + if !reflect.DeepEqual(m, c.exp) { + t.Fatalf("Got %v\nExpected %v", m, c.exp) + } + }) + } +} + +func TestMakeSubsitutions(t *testing.T) { + cases := []struct { + input string + subst map[string]string + exp string + expectedSubs []string + }{ + {input: "Replace $thisbin", subst: map[string]string{"this": "that"}, exp: "Replace that", expectedSubs: []string{"that"}}, + {input: "Replace $thisbin", subst: map[string]string{"this": "that", "here": "there"}, exp: "Replace that", expectedSubs: []string{"that"}}, + {input: "Replace $thisbin and $herebin", subst: map[string]string{"this": "that", "here": "there"}, exp: "Replace that and there", expectedSubs: []string{"that", "there"}}, + } + for _, c := range cases { + t.Run(c.input, func(t *testing.T) { + s, subs := makeSubstitutions(c.input, "bin", c.subst) + if s != c.exp { + t.Fatalf("Got %s expected %s", s, c.exp) + } + sort.Strings(subs) + assert.Equal(t, c.expectedSubs, subs) + }) + } +} + +func TestGetConfigFilePath(t *testing.T) { + var err error + cfgDir, err = ioutil.TempDir("", "kube-bench-test") + if err != nil { + t.Fatalf("Failed to create temp directory") + } + defer os.RemoveAll(cfgDir) + d := filepath.Join(cfgDir, "cis-1.4") + err = os.Mkdir(d, 0766) + if err != nil { + t.Fatalf("Failed to create temp dir") + } + err = ioutil.WriteFile(filepath.Join(d, "master.yaml"), []byte("hello world"), 0666) + if err != nil { + t.Logf("Failed to create temp file") + } + + cases := []struct { + benchmarkVersion string + succeed bool + exp string + }{ + {benchmarkVersion: "cis-1.4", succeed: true, exp: d}, + {benchmarkVersion: "cis-1.5", succeed: false, exp: ""}, + {benchmarkVersion: "1.1", succeed: false, exp: ""}, + } + + for _, c := range cases { + t.Run(c.benchmarkVersion, func(t *testing.T) { + path, err := getConfigFilePath(c.benchmarkVersion, "/master.yaml") + if c.succeed { + if err != nil { + t.Fatalf("Error %v", err) + } + if path != c.exp { + t.Fatalf("Got %s expected %s", path, c.exp) + } + } else { + if err == nil { + t.Fatalf("Expected Error, but none") + } + } + }) + } +} + +func TestDecrementVersion(t *testing.T) { + cases := []struct { + kubeVersion string + succeed bool + exp string + }{ + {kubeVersion: "1.13", succeed: true, exp: "1.12"}, + {kubeVersion: "1.15", succeed: true, exp: "1.14"}, + {kubeVersion: "1.11", succeed: true, exp: "1.10"}, + {kubeVersion: "1.1", succeed: true, exp: ""}, + {kubeVersion: "invalid", succeed: false, exp: ""}, + } + for _, c := range cases { + rv := decrementVersion(c.kubeVersion) + if c.succeed { + if c.exp != rv { + t.Fatalf("decrementVersion(%q) - Got %q expected %s", c.kubeVersion, rv, c.exp) + } + } else { + if len(rv) > 0 { + t.Fatalf("decrementVersion(%q) - Expected empty string but Got %s", c.kubeVersion, rv) + } + } + } +} + +func TestGetYamlFilesFromDir(t *testing.T) { + cfgDir, err := ioutil.TempDir("", "kube-bench-test") + if err != nil { + t.Fatalf("Failed to create temp directory") + } + defer os.RemoveAll(cfgDir) + + d := filepath.Join(cfgDir, "cis-1.4") + err = os.Mkdir(d, 0766) + if err != nil { + t.Fatalf("Failed to create temp dir") + } + + err = ioutil.WriteFile(filepath.Join(d, "something.yaml"), []byte("hello world"), 0666) + if err != nil { + t.Fatalf("error writing file %v", err) + } + err = ioutil.WriteFile(filepath.Join(d, "config.yaml"), []byte("hello world"), 0666) + if err != nil { + t.Fatalf("error writing file %v", err) + } + + files, err := getYamlFilesFromDir(d) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(files) != 1 { + t.Fatalf("Expected to find one file, found %d", len(files)) + } + + if files[0] != filepath.Join(d, "something.yaml") { + t.Fatalf("Expected to find something.yaml, found %s", files[0]) + } +} + +func Test_getPlatformNameFromKubectlOutput(t *testing.T) { + type args struct { + s string + } + tests := []struct { + name string + args args + want Platform + }{ + { + name: "eks", + args: args{s: "v1.17.9-eks-4c6976"}, + want: Platform{Name: "eks", Version: "1.17"}, + }, + { + name: "gke", + args: args{s: "v1.17.6-gke.1"}, + want: Platform{Name: "gke", Version: "1.17"}, + }, + { + name: "ack", + args: args{s: "v1.18.8-aliyun.1"}, + want: Platform{Name: "aliyun", Version: "1.18"}, + }, + { + name: "unknown", + args: args{s: "v1.17.6"}, + want: Platform{}, + }, + { + name: "empty string", + args: args{s: ""}, + want: Platform{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getPlatformInfoFromVersion(tt.args.s) + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_getPlatformBenchmarkVersion(t *testing.T) { + type args struct { + platform Platform + } + tests := []struct { + name string + args args + want string + }{ + { + name: "eks", + args: args{ + platform: Platform{Name: "eks"}, + }, + want: "eks-1.2.0", + }, + { + name: "gke 1.19", + args: args{ + platform: Platform{Name: "gke", Version: "1.19"}, + }, + want: "gke-1.0", + }, + { + name: "gke 1.20", + args: args{ + platform: Platform{Name: "gke", Version: "1.20"}, + }, + want: "gke-1.2.0", + }, + { + name: "gke 1.22", + args: args{ + platform: Platform{Name: "gke", Version: "1.22"}, + }, + want: "gke-1.2.0", + }, + { + name: "aliyun", + args: args{ + platform: Platform{Name: "aliyun"}, + }, + want: "ack-1.0", + }, + { + name: "unknown", + args: args{ + platform: Platform{Name: "rh"}, + }, + want: "", + }, + { + name: "empty", + args: args{ + platform: Platform{}, + }, + want: "", + }, + { + name: "openshift3", + args: args{ + platform: Platform{Name: "ocp", Version: "3.10"}, + }, + want: "rh-0.7", + }, + { + name: "openshift4", + args: args{ + platform: Platform{Name: "ocp", Version: "4.1"}, + }, + want: "rh-1.0", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getPlatformBenchmarkVersion(tt.args.platform); got != tt.want { + t.Errorf("getPlatformBenchmarkVersion() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_getOcpValidVersion(t *testing.T) { + cases := []struct { + openShiftVersion string + succeed bool + exp string + }{ + {openShiftVersion: "3.11", succeed: true, exp: "3.10"}, + {openShiftVersion: "3.10", succeed: true, exp: "3.10"}, + {openShiftVersion: "2.9", succeed: false, exp: ""}, + {openShiftVersion: "4.1", succeed: true, exp: "4.1"}, + {openShiftVersion: "4.5", succeed: true, exp: "4.1"}, + {openShiftVersion: "4.6", succeed: true, exp: "4.1"}, + {openShiftVersion: "invalid", succeed: false, exp: ""}, + } + for _, c := range cases { + ocpVer, _ := getOcpValidVersion(c.openShiftVersion) + if c.succeed { + if c.exp != ocpVer { + t.Errorf("getOcpValidVersion(%q) - Got %q expected %s", c.openShiftVersion, ocpVer, c.exp) + } + } else { + if len(ocpVer) > 0 { + t.Errorf("getOcpValidVersion(%q) - Expected empty string but Got %s", c.openShiftVersion, ocpVer) + } + } + } +} diff --git a/cmd/kvisor/main.go b/cmd/kvisor/main.go new file mode 100644 index 00000000..d766d9de --- /dev/null +++ b/cmd/kvisor/main.go @@ -0,0 +1,38 @@ +package main + +import ( + "fmt" + "os" + + "github.com/castai/kvisor/cmd/kvisor/agent" + "github.com/castai/kvisor/cmd/kvisor/imgcollector" + kubebench2 "github.com/castai/kvisor/cmd/kvisor/kubebench" + "github.com/spf13/cobra" +) + +// These should be set via `go build` during a release. +var ( + GitCommit = "undefined" + GitRef = "no-ref" + Version = "local" +) + +func main() { + root := cobra.Command{ + Use: "kvisor", + } + + kubeBenchCmd := kubebench2.NewCommand() + kubeBenchCmd.AddCommand(kubebench2.NewRunCommand()) + + root.AddCommand( + agent.NewCommand(Version, GitCommit, GitRef), + imgcollector.NewCommand(Version, GitCommit), + kubeBenchCmd, + ) + + if err := root.Execute(); err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } +} diff --git a/config/config.go b/config/config.go index e30ca062..05b42771 100644 --- a/config/config.go +++ b/config/config.go @@ -86,7 +86,6 @@ type ImageScan struct { } type ImageScanImage struct { - Name string `envconfig:"IMAGE_SCAN_IMAGE_NAME" yaml:"name"` PullPolicy string `envconfig:"IMAGE_SCAN_IMAGE_PULL_POLICY" yaml:"pullPolicy"` } @@ -103,7 +102,6 @@ type KubeBench struct { } type KubeBenchImage struct { - Name string `envconfig:"KUBE_BENCH_IMAGE_NAME" yaml:"name"` PullPolicy string `envconfig:"KUBE_BENCH_IMAGE_PULL_POLICY" yaml:"pullPolicy"` } @@ -174,9 +172,6 @@ func Load(configPath string) (Config, error) { } } if cfg.ImageScan.Enabled { - if cfg.ImageScan.Image.Name == "" { - return cfg, required("IMAGE_SCAN_IMAGE_NAME") - } if cfg.ImageScan.Image.PullPolicy == "" { cfg.ImageScan.Image.PullPolicy = "IfNotPresent" } @@ -221,8 +216,7 @@ func Load(configPath string) (Config, error) { if cfg.KubeBench.ScanInterval == 0 { cfg.KubeBench.ScanInterval = 30 * time.Second } - if cfg.KubeBench.Image.Name == "" { - cfg.KubeBench.Image.Name = "ghcr.io/castai/kvisor/kube-bench:v0.8.0" + if cfg.KubeBench.Image.PullPolicy == "" { cfg.KubeBench.Image.PullPolicy = "IfNotPresent" } } diff --git a/config/config_test.go b/config/config_test.go index 2f243252..35ac91eb 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -74,7 +74,6 @@ func newTestConfig() Config { MaxConcurrentScans: 3, InitDelay: 60 * time.Second, Image: ImageScanImage{ - Name: "collector-img", PullPolicy: "IfNotPresent", }, Mode: "mode", @@ -94,7 +93,6 @@ func newTestConfig() Config { Enabled: true, ScanInterval: 15 * time.Second, Image: KubeBenchImage{ - Name: "ghcr.io/castai/kvisor/kube-bench:v0.8.", PullPolicy: "IfNotPresent", }, }, diff --git a/e2e/e2e.go b/e2e/e2e.go index 9b2ab724..752305d3 100644 --- a/e2e/e2e.go +++ b/e2e/e2e.go @@ -171,10 +171,8 @@ func installChart(ns, imageTag string) ([]byte, error) { fmt.Printf("installing kvisor chart with image tag %q", imageTag) podIP := os.Getenv("POD_IP") apiURL := fmt.Sprintf("http://%s:8090", podIP) - collectorImage := fmt.Sprintf("ghcr.io/castai/kvisor/kvisor-imgcollector:%s", imageTag) agentRepo := "ghcr.io/castai/kvisor/kvisor" if imageTag == "local" { - collectorImage = "kvisor-imgcollector:local" agentRepo = "kvisor" } //nolint:gosec @@ -183,7 +181,7 @@ func installChart(ns, imageTag string) ([]byte, error) { -f ./charts/castai-kvisor/ci/test-values.yaml \ --set image.repository=%s \ --set image.tag=%s \ - --set structuredConfig.imageScan.image.name=%s \ + --set structuredConfig.provider=gke \ --set structuredConfig.imageScan.mode=hostfs \ --set structuredConfig.imageScan.initDelay=10s \ --set structuredConfig.linter.scanInterval=5s \ @@ -192,7 +190,7 @@ func installChart(ns, imageTag string) ([]byte, error) { --set structuredConfig.kubeBench.enabled=true \ --set structuredConfig.kubeClient.useProtobuf=true \ --set castai.apiURL=%s \ - --wait --timeout=1m`, ns, agentRepo, imageTag, collectorImage, apiURL)) + --wait --timeout=1m`, ns, agentRepo, imageTag, apiURL)) return cmd.CombinedOutput() } diff --git a/e2e/run.sh b/e2e/run.sh index dec2de62..698dde2e 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -20,13 +20,9 @@ kind load docker-image kvisor-e2e:local --name $KIND_CONTEXT if [ "$IMAGE_TAG" == "local" ] then - GOOS=linux GOARCH=$GOARCH CGO_ENABLED=0 go build -o bin/castai-kvisor-$GOARCH ./cmd/agent - docker build . -t kvisor:local -f Dockerfile.agent + GOOS=linux GOARCH=$GOARCH CGO_ENABLED=0 go build -o bin/castai-kvisor-$GOARCH ./cmd/kvisor + docker build . -t kvisor:local -f Dockerfile kind load docker-image kvisor:local --name $KIND_CONTEXT - - GOOS=linux GOARCH=$GOARCH CGO_ENABLED=0 go build -o bin/castai-imgcollector-$GOARCH ./cmd/imgcollector - docker build . -t kvisor-imgcollector:local -f Dockerfile.imgcollector - kind load docker-image kvisor-imgcollector:local --name $KIND_CONTEXT fi # Deploy e2e resources. diff --git a/go.mod b/go.mod index b63e4902..88ea8080 100644 --- a/go.mod +++ b/go.mod @@ -9,13 +9,15 @@ require ( github.com/aquasecurity/trivy v0.35.0 github.com/aws/aws-sdk-go-v2/config v1.18.3 github.com/aws/aws-sdk-go-v2/service/eks v1.22.1 - github.com/aws/smithy-go v1.13.4 + github.com/aws/smithy-go v1.18.1 github.com/bombsimon/logrusr/v4 v4.0.0 github.com/castai/image-analyzer v0.2.0 github.com/cenkalti/backoff/v4 v4.1.3 github.com/containerd/containerd v1.7.8 github.com/davecgh/go-spew v1.1.1 + github.com/fatih/color v1.13.0 github.com/go-resty/resty/v2 v2.7.0 + github.com/golang/glog v1.1.2 github.com/golang/mock v1.6.0 github.com/google/go-containerregistry v0.12.0 github.com/google/uuid v1.3.1 @@ -24,16 +26,21 @@ require ( github.com/hashicorp/golang-lru v0.5.4 github.com/json-iterator/go v1.1.12 github.com/kelseyhightower/envconfig v1.4.0 + github.com/magiconair/properties v1.8.6 + github.com/onsi/ginkgo v1.16.5 github.com/open-policy-agent/cert-controller v0.7.0 github.com/prometheus/client_golang v1.14.0 github.com/samber/lo v1.33.0 github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.6.1 + github.com/spf13/viper v1.13.0 github.com/stretchr/testify v1.8.1 golang.org/x/oauth2 v0.11.0 golang.org/x/sync v0.3.0 golang.stackrox.io/kube-linter v0.4.1-0.20221021125313-bd11843210d1 google.golang.org/api v0.126.0 gopkg.in/inf.v0 v0.9.1 + gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.26.11 k8s.io/apimachinery v0.27.1 @@ -69,11 +76,11 @@ require ( github.com/aquasecurity/go-dep-parser v0.0.0-20221114145626-35ef808901e8 // indirect github.com/aquasecurity/trivy-db v0.0.0-20220627104749-930461748b63 // indirect github.com/aws/aws-sdk-go v1.44.136 // indirect - github.com/aws/aws-sdk-go-v2 v1.17.1 // indirect + github.com/aws/aws-sdk-go-v2 v1.23.5 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.13.3 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.11.25 // indirect @@ -145,11 +152,11 @@ require ( github.com/liamg/jfather v0.0.7 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lunixbochs/struc v0.0.0-20200707160740-784aaebc1d40 // indirect - github.com/magiconair/properties v1.8.6 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/masahiro331/go-disk v0.0.0-20220919035250-c8da316f91ac // indirect github.com/masahiro331/go-ext4-filesystem v0.0.0-20221016160854-4b40d7ee6193 // indirect github.com/masahiro331/go-xfs-filesystem v0.0.0-20221127135739-051c25f1becd // indirect + github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/microsoft/go-rustaudit v0.0.0-20220808201409-204dfee52032 // indirect @@ -189,10 +196,8 @@ require ( github.com/shopspring/decimal v1.2.0 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.6.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.13.0 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect @@ -225,7 +230,6 @@ require ( google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect helm.sh/helm/v3 v3.10.3 // indirect k8s.io/apiextensions-apiserver v0.26.1 // indirect k8s.io/cli-runtime v0.25.3 // indirect @@ -262,4 +266,4 @@ replace github.com/chzyer/logex v1.1.10 => github.com/chzyer/logex v1.2.0 replace github.com/containerd/containerd => github.com/containerd/containerd v1.6.1-0.20220706215228-681aaf68b7dc -replace github.com/go-enry/go-license-detector/v4 v4.3.0 => ./cmd/imgcollector/stub/licensing +replace github.com/go-enry/go-license-detector/v4 v4.3.0 => ./cmd/kvisor/imgcollector/stub/licensing diff --git a/go.sum b/go.sum index 3439ceb8..ddc7bc59 100644 --- a/go.sum +++ b/go.sum @@ -178,8 +178,9 @@ github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:W github.com/aws/aws-sdk-go v1.44.136 h1:J1KJJssa8pjU8jETYUxwRS37KTcxjACfKd9GK8t+5ZU= github.com/aws/aws-sdk-go v1.44.136/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= -github.com/aws/aws-sdk-go-v2 v1.17.1 h1:02c72fDJr87N8RAC2s3Qu0YuvMRZKNZJ9F+lAehCazk= github.com/aws/aws-sdk-go-v2 v1.17.1/go.mod h1:JLnGeGONAyi2lWXI1p0PCIOIy333JMVK1U7Hf0aRFLw= +github.com/aws/aws-sdk-go-v2 v1.23.5 h1:xK6C4udTyDMd82RFvNkDQxtAd00xlzFUtX4fF2nMZyg= +github.com/aws/aws-sdk-go-v2 v1.23.5/go.mod h1:t3szzKfP0NeRU27uBFczDivYJjsmSnqI8kIvKyWb9ds= github.com/aws/aws-sdk-go-v2/config v1.18.3 h1:3kfBKcX3votFX84dm00U8RGA1sCCh3eRMOGzg5dCWfU= github.com/aws/aws-sdk-go-v2/config v1.18.3/go.mod h1:BYdrbeCse3ZnOD5+2/VE/nATOK8fEUpBtmPMdKSyhMU= github.com/aws/aws-sdk-go-v2/credentials v1.13.3 h1:ur+FHdp4NbVIv/49bUjBW+FE7e57HOo03ELodttmagk= @@ -187,11 +188,13 @@ github.com/aws/aws-sdk-go-v2/credentials v1.13.3/go.mod h1:/rOMmqYBcFfNbRPU0iN9I github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 h1:E3PXZSI3F2bzyj6XxUXdTIfvp425HHhwKsFvmzBwHgs= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19/go.mod h1:VihW95zQpeKQWVPGkwT+2+WJNQV8UXFfMTWdU6VErL8= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 h1:nBO/RFxeq/IS5G9Of+ZrgucRciie2qpLy++3UGZ+q2E= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25/go.mod h1:Zb29PYkf42vVYQY6pvSyJCJcFHlPIiY+YKdPtwnvMkY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 h1:8GVZIR0y6JRIUNSYI1xAMF4HDfV8H/bOsZ/8AD/uY5Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8/go.mod h1:rwBfu0SoUkBUZndVgPZKAD9Y2JigaZtRP68unRiYToQ= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 h1:oRHDrwCTVT8ZXi4sr9Ld+EXk7N/KGssOr2ygNeojEhw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19/go.mod h1:6Q0546uHDp421okhmmGfbxzq2hBqbXFNpi4k+Q1JnQA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 h1:ZE2ds/qeBkhk3yqYvS3CDCFNvd9ir5hMjlVStLZWrvM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8/go.mod h1:/lAPPymDYL023+TS6DJmjuL42nxix2AvEvfjqOBRODk= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 h1:Mza+vlnZr+fPKFKRq/lKGVvM6B/8ZZmNdEopOwSQLms= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26/go.mod h1:Y2OJ+P+MC1u1VKnavT+PshiEuGPyh/7DqxoDNij4/bg= github.com/aws/aws-sdk-go-v2/service/eks v1.22.1 h1:f07Bk+xMm0Q8PCzvrBg8Bd6m67CTvZSxQWB0H7ZEJOU= @@ -205,8 +208,9 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8/go.mod h1:er2JHN+kBY6FcMfcB github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 h1:60SJ4lhvn///8ygCzYy2l53bFW/Q15bVfyjyAWo6zuw= github.com/aws/aws-sdk-go-v2/service/sts v1.17.5/go.mod h1:bXcN3koeVYiJcdDU89n3kCYILob7Y34AeLopUbZgLT4= github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.13.4 h1:/RN2z1txIJWeXeOkzX+Hk/4Uuvv7dWtCjbmVJcrskyk= github.com/aws/smithy-go v1.13.4/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.18.1 h1:pOdBTUfXNazOlxLrgeYalVnuTpKreACHtc62xLwIB3c= +github.com/aws/smithy-go v1.18.1/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -513,6 +517,8 @@ github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8 github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -804,10 +810,12 @@ github.com/masahiro331/go-ext4-filesystem v0.0.0-20221016160854-4b40d7ee6193/go. github.com/masahiro331/go-xfs-filesystem v0.0.0-20221127135739-051c25f1becd h1:jOFGJ9IFmR9jbm06nZzSR9xdd5clVbRcK55yGNhqMYM= github.com/masahiro331/go-xfs-filesystem v0.0.0-20221127135739-051c25f1becd/go.mod h1:QKBZqdn6teT0LK3QhAf3K6xakItd1LonOShOEC44idQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= diff --git a/imagescan/controller.go b/imagescan/controller.go index 5952d84b..1128d6e4 100644 --- a/imagescan/controller.go +++ b/imagescan/controller.go @@ -9,13 +9,13 @@ import ( "sync" "time" + imgcollectorconfig "github.com/castai/kvisor/cmd/kvisor/imgcollector/config" "github.com/samber/lo" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "github.com/castai/kvisor/castai" - imgcollectorconfig "github.com/castai/kvisor/cmd/imgcollector/config" "github.com/castai/kvisor/config" "github.com/castai/kvisor/kube" "github.com/castai/kvisor/metrics" @@ -33,7 +33,7 @@ func NewController( imageScanner imageScanner, client castaiClient, k8sVersionMinor int, - podOwnerGetter podOwnerGetter, + kubeController kubeController, ) *Controller { ctx, cancel := context.WithCancel(context.Background()) log = log.WithField("component", "imagescan") @@ -42,7 +42,8 @@ func NewController( cancel: cancel, imageScanner: imageScanner, client: client, - delta: newDeltaState(podOwnerGetter), + kubeController: kubeController, + delta: newDeltaState(kubeController), log: log, cfg: cfg, k8sVersionMinor: k8sVersionMinor, @@ -63,6 +64,7 @@ type Controller struct { delta *deltaState imageScanner imageScanner client castaiClient + kubeController kubeController log logrus.FieldLogger cfg config.ImageScan k8sVersionMinor int @@ -310,6 +312,11 @@ func (s *Controller) scanImage(ctx context.Context, img *image) (rerr error) { metrics.ObserveScanDuration(metrics.ScanTypeImage, start) }() + collectorImageDetails, found := s.kubeController.GetKvisorImageDetails() + if !found { + return errors.New("kvisor image details not found") + } + return s.imageScanner.ScanImage(ctx, ScanImageParams{ ImageName: img.name, ImageID: img.id, @@ -322,6 +329,7 @@ func (s *Controller) scanImage(ctx context.Context, img *image) (rerr error) { WaitDurationAfterCompletion: 30 * time.Second, Architecture: img.architecture, Os: img.os, + CollectorImageDetails: collectorImageDetails, }) } diff --git a/imagescan/controller_test.go b/imagescan/controller_test.go index 26ebdfde..0bdec45c 100644 --- a/imagescan/controller_test.go +++ b/imagescan/controller_test.go @@ -8,6 +8,8 @@ import ( "testing" "time" + imgcollectorconfig "github.com/castai/kvisor/cmd/kvisor/imgcollector/config" + "github.com/castai/kvisor/kube" "github.com/google/uuid" "github.com/sirupsen/logrus" "github.com/stretchr/testify/mock" @@ -18,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/types" "github.com/castai/kvisor/castai" - imgcollectorconfig "github.com/castai/kvisor/cmd/imgcollector/config" "github.com/castai/kvisor/config" ) @@ -236,6 +237,10 @@ func TestSubscriber(t *testing.T) { WaitDurationAfterCompletion: 30 * time.Second, Architecture: defaultImageArch, Os: defaultImageOs, + CollectorImageDetails: kube.KvisorImageDetails{ + ImageName: "kvisor", + ImagePullSecrets: nil, + }, }, ngnxImage) r.Len(client.getImagesResourcesChanges(), 1) r.Len(client.getImagesResourcesChanges()[0].Images, 3) @@ -413,7 +418,7 @@ func TestSubscriber(t *testing.T) { scanner := &mockImageScanner{} scanner.On("ScanImage", mock.Anything, mock.Anything).Return(nil) client := &mockCastaiClient{} - podOwnerGetter := &mockPodOwnerGetter{} + podOwnerGetter := &mockKubeController{} sub := NewController(log, cfg, scanner, client, 21, podOwnerGetter) sub.initialScansDelay = 1 * time.Millisecond sub.timeGetter = func() time.Time { @@ -856,7 +861,7 @@ func TestController_findBestNodeAndMode(t *testing.T) { func newTestController(log logrus.FieldLogger, cfg config.ImageScan) *Controller { scanner := &mockImageScanner{} client := &mockCastaiClient{} - podOwnerGetter := &mockPodOwnerGetter{} + podOwnerGetter := &mockKubeController{} return NewController(log, cfg, scanner, client, 21, podOwnerGetter) } @@ -879,10 +884,17 @@ func (m *mockImageScanner) getScanImageParams() []ScanImageParams { return m.imgs } -type mockPodOwnerGetter struct { +type mockKubeController struct { +} + +func (m *mockKubeController) GetKvisorImageDetails() (kube.KvisorImageDetails, bool) { + return kube.KvisorImageDetails{ + ImageName: "kvisor", + ImagePullSecrets: nil, + }, true } -func (m *mockPodOwnerGetter) GetPodOwnerID(pod *corev1.Pod) string { +func (m *mockKubeController) GetPodOwnerID(pod *corev1.Pod) string { return string(pod.UID) } diff --git a/imagescan/delta.go b/imagescan/delta.go index 5882169b..4eed45af 100644 --- a/imagescan/delta.go +++ b/imagescan/delta.go @@ -6,6 +6,7 @@ import ( "strings" "time" + imgcollectorconfig "github.com/castai/kvisor/cmd/kvisor/imgcollector/config" "github.com/samber/lo" "gopkg.in/inf.v0" corev1 "k8s.io/api/core/v1" @@ -13,7 +14,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "github.com/castai/kvisor/castai" - imgcollectorconfig "github.com/castai/kvisor/cmd/imgcollector/config" "github.com/castai/kvisor/kube" ) @@ -24,8 +24,9 @@ var ( const defaultImageOs = "linux" const defaultImageArch = "amd64" -type podOwnerGetter interface { +type kubeController interface { GetPodOwnerID(pod *corev1.Pod) string + GetKvisorImageDetails() (kube.KvisorImageDetails, bool) } func newImage() *image { @@ -41,9 +42,9 @@ func newImage() *image { } } -func newDeltaState(podOwnerGetter podOwnerGetter) *deltaState { +func newDeltaState(kubeController kubeController) *deltaState { return &deltaState{ - podOwnerGetter: podOwnerGetter, + kubeController: kubeController, queue: make(chan deltaQueueItem, 1000), images: map[string]*image{}, nodes: make(map[string]*node), @@ -56,7 +57,7 @@ type deltaQueueItem struct { } type deltaState struct { - podOwnerGetter podOwnerGetter + kubeController kubeController // queue is informers received k8s objects but not yet applied to delta. // This allows to have lock free access to delta state during image scan. @@ -153,7 +154,7 @@ func (d *deltaState) upsertImages(pod *corev1.Pod) { containerStatuses = append(containerStatuses, pod.Status.InitContainerStatuses...) podID := string(pod.UID) // Get the resource id of Deployment, ReplicaSet, StatefulSet, Job, CronJob. - ownerResourceID := d.podOwnerGetter.GetPodOwnerID(pod) + ownerResourceID := d.kubeController.GetPodOwnerID(pod) for _, cont := range containers { cs, found := lo.Find(containerStatuses, func(v corev1.ContainerStatus) bool { @@ -221,7 +222,7 @@ func (d *deltaState) handlePodDelete(pod *corev1.Pod) { delete(n.podIDs, podID) } - ownerResourceID := d.podOwnerGetter.GetPodOwnerID(pod) + ownerResourceID := d.kubeController.GetPodOwnerID(pod) if owner, found := img.owners[ownerResourceID]; found { delete(owner.podIDs, podID) if len(owner.podIDs) == 0 { diff --git a/imagescan/delta_test.go b/imagescan/delta_test.go index f4d2d27a..2e5bc31c 100644 --- a/imagescan/delta_test.go +++ b/imagescan/delta_test.go @@ -402,5 +402,5 @@ func TestDelta(t *testing.T) { } func newTestDelta() *deltaState { - return newDeltaState(&mockPodOwnerGetter{}) + return newDeltaState(&mockKubeController{}) } diff --git a/imagescan/scanner.go b/imagescan/scanner.go index aee99359..2fcad09b 100644 --- a/imagescan/scanner.go +++ b/imagescan/scanner.go @@ -10,6 +10,8 @@ import ( "strings" "time" + imgcollectorconfig "github.com/castai/kvisor/cmd/kvisor/imgcollector/config" + "github.com/castai/kvisor/kube" "github.com/samber/lo" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -21,7 +23,6 @@ import ( "k8s.io/client-go/kubernetes" batchv1typed "k8s.io/client-go/kubernetes/typed/batch/v1" - imgcollectorconfig "github.com/castai/kvisor/cmd/imgcollector/config" "github.com/castai/kvisor/config" "github.com/castai/kvisor/log" ) @@ -66,6 +67,7 @@ type ScanImageParams struct { WaitDurationAfterCompletion time.Duration Architecture string Os string + CollectorImageDetails kube.KvisorImageDetails } func (s *Scanner) ScanImage(ctx context.Context, params ScanImageParams) (rerr error) { @@ -248,6 +250,7 @@ func (s *Scanner) ScanImage(ctx context.Context, params ScanImageParams) (rerr e vols, tolerations, s.cfg.ImageScan, + params.CollectorImageDetails, ) jobs := s.client.BatchV1().Jobs(s.cfg.PodNamespace) @@ -385,6 +388,7 @@ func scanJobSpec( vol volumesAndMounts, tolerations []corev1.Toleration, cfg config.ImageScan, + collectorImageDetails kube.KvisorImageDetails, ) *batchv1.Job { job := &batchv1.Job{ TypeMeta: metav1.TypeMeta{ @@ -434,6 +438,7 @@ func scanJobSpec( Tolerations: tolerations, AutomountServiceAccountToken: lo.ToPtr(false), ServiceAccountName: cfg.ServiceAccountName, + ImagePullSecrets: collectorImageDetails.ImagePullSecrets, Containers: []corev1.Container{ { SecurityContext: &corev1.SecurityContext{ @@ -441,8 +446,11 @@ func scanJobSpec( RunAsNonRoot: lo.ToPtr(true), AllowPrivilegeEscalation: lo.ToPtr(false), }, - Name: "collector", - Image: cfg.Image.Name, + Name: "collector", + Image: collectorImageDetails.ImageName, + Args: []string{ + "analyze-image", + }, ImagePullPolicy: corev1.PullPolicy(cfg.Image.PullPolicy), Env: envVars, VolumeMounts: vol.mounts, diff --git a/imagescan/scanner_test.go b/imagescan/scanner_test.go index 86306e7b..5abe0a14 100644 --- a/imagescan/scanner_test.go +++ b/imagescan/scanner_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/castai/kvisor/kube" "github.com/samber/lo" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" @@ -35,9 +36,7 @@ func TestScanner(t *testing.T) { PodIP: "10.10.5.77", PodNamespace: ns, ImageScan: config.ImageScan{ - Image: config.ImageScanImage{ - Name: "imgcollector:1.0.0", - }, + Image: config.ImageScanImage{}, APIUrl: "http://kvisor:6060", DockerOptionsPath: "/etc/docker/config.json", CPURequest: "500m", @@ -61,6 +60,9 @@ func TestScanner(t *testing.T) { ResourceIDs: []string{"p1", "p2"}, Architecture: "amd64", Os: "linux", + CollectorImageDetails: kube.KvisorImageDetails{ + ImageName: "imgcollector:1.0.0", + }, }) r.NoError(err) @@ -127,6 +129,9 @@ func TestScanner(t *testing.T) { { Name: "collector", Image: "imgcollector:1.0.0", + Args: []string{ + "analyze-image", + }, Env: []corev1.EnvVar{ { Name: "GOMEMLIMIT", @@ -262,6 +267,9 @@ func TestScanner(t *testing.T) { NodeName: "n1", ResourceIDs: []string{"p1", "p2"}, DeleteFinishedJob: true, + CollectorImageDetails: kube.KvisorImageDetails{ + ImageName: "imgcollector:1.0.0", + }, }) r.NoError(err) @@ -303,9 +311,7 @@ func TestScanner(t *testing.T) { scanner := NewImageScanner(client, config.Config{ PodNamespace: ns, ImageScan: config.ImageScan{ - Image: config.ImageScanImage{ - Name: "imgcollector:1.0.0", - }, + Image: config.ImageScanImage{}, }, }) scanner.jobCheckInterval = 1 * time.Microsecond @@ -318,6 +324,9 @@ func TestScanner(t *testing.T) { NodeName: "n1", ResourceIDs: []string{"p1", "p2"}, WaitForCompletion: true, + CollectorImageDetails: kube.KvisorImageDetails{ + ImageName: "imgcollector:1.0.0", + }, }) r.ErrorContains(err, "[type=Ready, status=False, reason=no cpu], [type=PodScheduled, status=False, reason=no cpu]") }) diff --git a/kube/controller.go b/kube/controller.go index 252e398f..b7b09eec 100644 --- a/kube/controller.go +++ b/kube/controller.go @@ -31,6 +31,7 @@ func NewController( log logrus.FieldLogger, f informers.SharedInformerFactory, k8sVersion version.Version, + kvisorNamespace string, ) *Controller { typeInformerMap := map[reflect.Type]cache.SharedInformer{ reflect.TypeOf(&corev1.Node{}): f.Core().V1().Nodes().Informer(), @@ -62,6 +63,7 @@ func NewController( informerFactory: f, informers: typeInformerMap, podsBuffSyncInterval: 5 * time.Second, + kvisorNamespace: kvisorNamespace, replicaSets: make(map[types.UID]*appsv1.ReplicaSet), deployments: make(map[types.UID]*appsv1.Deployment), jobs: make(map[types.UID]*batchv1.Job), @@ -77,6 +79,7 @@ type Controller struct { subscribers []ObjectSubscriber podsBuffSyncInterval time.Duration + kvisorNamespace string deltasMu sync.RWMutex replicaSets map[types.UID]*appsv1.ReplicaSet @@ -169,6 +172,48 @@ func (c *Controller) GetPodOwnerID(pod *corev1.Pod) string { return string(pod.UID) } +type KvisorImageDetails struct { + ImageName string + ImagePullSecrets []corev1.LocalObjectReference +} + +// GetKvisorImageDetails returns kvisor image details. +// This is used for image analyzer and kube-bench dynamic jobs to schedule using the same image. +func (c *Controller) GetKvisorImageDetails() (KvisorImageDetails, bool) { + spec, found := c.getKvisorDeploymentSpec() + if !found { + c.log.Warn("kvisor deployment not found") + return KvisorImageDetails{}, false + } + var imageName string + for _, container := range spec.Template.Spec.Containers { + if container.Name == "kvisor" { + imageName = container.Image + break + } + } + if imageName == "" { + c.log.Warn("kvisor container image not found") + return KvisorImageDetails{}, false + } + return KvisorImageDetails{ + ImageName: imageName, + ImagePullSecrets: spec.Template.Spec.ImagePullSecrets, + }, true +} + +func (c *Controller) getKvisorDeploymentSpec() (appsv1.DeploymentSpec, bool) { + c.deltasMu.RLock() + defer c.deltasMu.RUnlock() + + for _, deployment := range c.deployments { + if deployment.Namespace == c.kvisorNamespace && deployment.Name == "castai-kvisor" { + return deployment.Spec, true + } + } + return appsv1.DeploymentSpec{}, false +} + func (c *Controller) runSubscriber(ctx context.Context, subscriber ObjectSubscriber) error { requiredInformerTypes := subscriber.RequiredInformers() syncs := make([]cache.InformerSynced, 0, len(requiredInformerTypes)) diff --git a/kube/controller_test.go b/kube/controller_test.go index 0f53d86d..29a5aa9f 100644 --- a/kube/controller_test.go +++ b/kube/controller_test.go @@ -70,7 +70,7 @@ func TestController(t *testing.T) { newTestSubscriber(log.WithField("sub", "sub1")), newTestSubscriber(log.WithField("sub", "sub2")), } - ctrl := NewController(log, informersFactory, version.Version{MinorInt: 22}) + ctrl := NewController(log, informersFactory, version.Version{MinorInt: 22}, "castai-agent") ctrl.AddSubscribers(testSubs...) ctrl.podsBuffSyncInterval = 1 * time.Millisecond @@ -247,7 +247,7 @@ func TestController(t *testing.T) { informersFactory := informers.NewSharedInformerFactory(clientset, 0) testSub := newTestSubscriber(log.WithField("sub", "sub1")) - ctrl := NewController(log, informersFactory, version.Version{MinorInt: 22}) + ctrl := NewController(log, informersFactory, version.Version{MinorInt: 22}, "castai-agent") ctrl.podsBuffSyncInterval = 10 * time.Millisecond ctrl.AddSubscribers(testSub) diff --git a/linters/kubebench/controller.go b/linters/kubebench/controller.go index 437df82a..2ab1e4ad 100644 --- a/linters/kubebench/controller.go +++ b/linters/kubebench/controller.go @@ -38,6 +38,10 @@ const ( maxConcurrentJobs = 1 ) +type kubeController interface { + GetKvisorImageDetails() (kube.KvisorImageDetails, bool) +} + func NewController( log logrus.FieldLogger, client kubernetes.Interface, @@ -47,6 +51,7 @@ func NewController( scanInterval time.Duration, castClient castai.Client, logsReader log.PodLogProvider, + kubeController kubeController, scannedNodes []string, ) *Controller { nodeCache, _ := lru.New(1000) @@ -63,6 +68,7 @@ func NewController( provider: provider, castClient: castClient, logsProvider: logsReader, + kubeController: kubeController, scanInterval: scanInterval, scannedNodes: nodeCache, finishedJobDeleteWaitDuration: 10 * time.Second, @@ -79,6 +85,7 @@ type Controller struct { delta *nodeDeltaState provider string logsProvider log.PodLogProvider + kubeController kubeController scanInterval time.Duration finishedJobDeleteWaitDuration time.Duration scannedNodes *lru.Cache @@ -280,11 +287,16 @@ func (s *Controller) createKubebenchJob(ctx context.Context, node *corev1.Node, specFn := resolveSpec(s.provider, node) jobSpec := specFn(node.GetName(), jobName) - // Set image from config. + // Set image. + imageDetails, found := s.kubeController.GetKvisorImageDetails() + if !found { + return nil, errors.New("kvisor image details not found") + } cont := jobSpec.Spec.Template.Spec.Containers[0] - cont.Image = s.cfg.Image.Name + cont.Image = imageDetails.ImageName cont.ImagePullPolicy = corev1.PullPolicy(s.cfg.Image.PullPolicy) jobSpec.Spec.Template.Spec.Containers[0] = cont + jobSpec.Spec.Template.Spec.ImagePullSecrets = imageDetails.ImagePullSecrets job, err := s.client.BatchV1(). Jobs(s.castaiNamespace). diff --git a/linters/kubebench/controller_test.go b/linters/kubebench/controller_test.go index cd9375f0..1f18e3b5 100644 --- a/linters/kubebench/controller_test.go +++ b/linters/kubebench/controller_test.go @@ -11,6 +11,8 @@ import ( "github.com/castai/kvisor/castai" "github.com/castai/kvisor/config" + "github.com/castai/kvisor/kube" + "github.com/castai/kvisor/log" "github.com/golang/mock/gomock" "github.com/google/uuid" "github.com/sirupsen/logrus" @@ -21,7 +23,6 @@ import ( "k8s.io/client-go/kubernetes/fake" mock_castai "github.com/castai/kvisor/castai/mock" - agentlog "github.com/castai/kvisor/log" ) func TestSubscriber(t *testing.T) { @@ -38,6 +39,8 @@ func TestSubscriber(t *testing.T) { log.SetOutput(&logOutput) logProvider := newMockLogProvider(readReport()) + kubeCtrl := &mockKubeController{} + castaiNamespace := "castai-sec" ctrl := NewController( log, @@ -48,6 +51,7 @@ func TestSubscriber(t *testing.T) { 5*time.Millisecond, mockCast, logProvider, + kubeCtrl, nil, ) ctrl.finishedJobDeleteWaitDuration = 0 @@ -114,6 +118,7 @@ func TestSubscriber(t *testing.T) { var logOutput bytes.Buffer log.SetOutput(&logOutput) logProvider := newMockLogProvider(readReport()) + kubeCtrl := &mockKubeController{} castaiNamespace := "castai-sec" ctrl := NewController( @@ -125,6 +130,7 @@ func TestSubscriber(t *testing.T) { 5*time.Millisecond, mockCast, logProvider, + kubeCtrl, nil, ) nodeID := types.UID(uuid.NewString()) @@ -170,6 +176,7 @@ func TestSubscriber(t *testing.T) { var logOutput bytes.Buffer log.SetOutput(&logOutput) logProvider := newMockLogProvider(readReport()) + kubeCtrl := &mockKubeController{} castaiNamespace := "castai-sec" ctrl := NewController( @@ -181,6 +188,7 @@ func TestSubscriber(t *testing.T) { 5*time.Millisecond, mockCast, logProvider, + kubeCtrl, nil, ) nodeID := types.UID(uuid.NewString()) @@ -256,12 +264,12 @@ func TestNodeGroupKey(t *testing.T) { r.NotEqual(key1, key2) } -type mockProvider struct { - logs []byte +func newMockLogProvider(b []byte) log.PodLogProvider { + return &mockProvider{logs: b} } -func newMockLogProvider(b []byte) agentlog.PodLogProvider { - return &mockProvider{logs: b} +type mockProvider struct { + logs []byte } func (m *mockProvider) GetLogReader(_ context.Context, _, _ string) (io.ReadCloser, error) { @@ -274,3 +282,17 @@ func readReport() []byte { return reportBytes } + +type mockKubeController struct { +} + +func (m *mockKubeController) GetKvisorImageDetails() (kube.KvisorImageDetails, bool) { + return kube.KvisorImageDetails{ + ImageName: "kvisor", + ImagePullSecrets: nil, + }, true +} + +func (m *mockKubeController) GetPodOwnerID(pod *corev1.Pod) string { + return string(pod.UID) +} diff --git a/linters/kubebench/spec/aks.go b/linters/kubebench/spec/aks.go index b34500e3..7f6e6f14 100644 --- a/linters/kubebench/spec/aks.go +++ b/linters/kubebench/spec/aks.go @@ -37,8 +37,13 @@ func AKS(nodeName, jobName string) *batchv1.Job { ReadOnlyRootFilesystem: lo.ToPtr(true), AllowPrivilegeEscalation: lo.ToPtr(false), }, - Command: []string{ - "kube-bench", "run", "--targets", "node", "--benchmark", "aks-1.3", "--json", + Args: []string{ + "kube-bench", + "--config-dir", "/etc/kubebench-rules/", + "run", + "--targets", "node", + "--benchmark", "aks-1.3", + "--json", }, VolumeMounts: []corev1.VolumeMount{ { diff --git a/linters/kubebench/spec/eks.go b/linters/kubebench/spec/eks.go index 88f1aa4f..26a37965 100644 --- a/linters/kubebench/spec/eks.go +++ b/linters/kubebench/spec/eks.go @@ -37,13 +37,12 @@ func EKS(nodeName, jobName string) *batchv1.Job { ReadOnlyRootFilesystem: lo.ToPtr(true), AllowPrivilegeEscalation: lo.ToPtr(false), }, - Command: []string{ + Args: []string{ "kube-bench", + "--config-dir", "/etc/kubebench-rules/", "run", - "--targets", - "node", - "--benchmark", - "eks-1.3.0", + "--targets", "node", + "--benchmark", "eks-1.3.0", "--json", }, VolumeMounts: []corev1.VolumeMount{ diff --git a/linters/kubebench/spec/gke.go b/linters/kubebench/spec/gke.go index 9e8ff4c1..16d1709a 100644 --- a/linters/kubebench/spec/gke.go +++ b/linters/kubebench/spec/gke.go @@ -37,13 +37,13 @@ func GKE(nodeName, jobName string) *batchv1.Job { ReadOnlyRootFilesystem: lo.ToPtr(true), AllowPrivilegeEscalation: lo.ToPtr(false), }, - Command: []string{ + Args: []string{ "kube-bench", + "--config-dir", "/etc/kubebench-rules/", "run", "--targets", "node,policies,managedservices", - "--benchmark", - "gke-1.4.0", + "--benchmark", "gke-1.4.0", "--json", }, VolumeMounts: []corev1.VolumeMount{ diff --git a/linters/kubebench/spec/master.go b/linters/kubebench/spec/master.go index 985108c4..29f678d8 100644 --- a/linters/kubebench/spec/master.go +++ b/linters/kubebench/spec/master.go @@ -52,8 +52,12 @@ func Master(nodeName, jobName string) *batchv1.Job { ReadOnlyRootFilesystem: lo.ToPtr(true), AllowPrivilegeEscalation: lo.ToPtr(false), }, - Command: []string{ - "kube-bench", "run", "--targets", "master", "--json", + Args: []string{ + "kube-bench", + "--config-dir", "/etc/kubebench-rules/", + "run", + "--targets", "master", + "--json", }, VolumeMounts: []corev1.VolumeMount{ { diff --git a/linters/kubebench/spec/node.go b/linters/kubebench/spec/node.go index dc0294c4..9ba00650 100644 --- a/linters/kubebench/spec/node.go +++ b/linters/kubebench/spec/node.go @@ -37,8 +37,13 @@ func Node(nodeName, jobName string) *batchv1.Job { ReadOnlyRootFilesystem: lo.ToPtr(true), AllowPrivilegeEscalation: lo.ToPtr(false), }, - Command: []string{ - "kube-bench", "run", "--targets", "node", "--json", + Args: []string{ + "kube-bench", + "--config-dir", "/etc/kubebench-rules/", + "run", + "--targets", + "node", + "--json", }, VolumeMounts: []corev1.VolumeMount{ {