From 1d9ea08498d7160d825ff96d7299b6aae6764234 Mon Sep 17 00:00:00 2001 From: Anders Eknert Date: Wed, 18 Oct 2023 17:27:18 +0200 Subject: [PATCH] Add Regal for linting Rego (#194) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hi there, GKE friends 👋😃 This PR introduces [Regal](https://github.com/StyraInc/regal) for linting Rego policy for the project. I've limited linting (and fixes) to the `gke-policies-v2` directory here, but I'd be happy to expand this to the v1 directory as well if you think that'd be valuable. Along with a configuration file to disable some of the linters that'd require a more concentrated effort to adhere to, I've fixed the following reported violations, which while mostly style-related, seems like they'd be good to enforce for modern Rego: * [use-some-for-output-vars](https://docs.styra.com/regal/rules/use-some-for-output-vars) * [non-raw-regex-pattern](https://docs.styra.com/regal/rules/idiomatic/non-raw-regex-pattern) * [prefer-package-imports](https://docs.styra.com/regal/rules/imports/prefer-package-imports) * [chained-rule-body](https://docs.styra.com/regal/rules/style/chained-rule-body) * [identically-named-tests](https://docs.styra.com/regal/rules/testing/identically-named-tests) * [use-assignment-operator](https://docs.styra.com/regal/rules/style/use-assignment-operator) I've also included linting as a step for the GitHub Action that runs tests of all the policies. Let me know what you think! Signed-off-by: Anders Eknert --- .github/workflows/policy-test.yml | 9 ++++-- .regal/config.yaml | 32 +++++++++++++++++++ gke-policies-v2/AUTHORING.md | 2 +- gke-policies-v2/policy/autopilot_cluster.rego | 4 +-- .../policy/cluster_binary_authorization.rego | 2 +- .../policy/cluster_gce_csi_driver.rego | 2 +- .../policy/cluster_gce_csi_driver_test.rego | 2 +- .../policy/cluster_maintenance_window.rego | 2 +- .../policy/cluster_receive_updates.rego | 10 +++--- .../policy/cluster_release_channels.rego | 4 +-- .../policy/control_plane_access.rego | 8 ++--- ...ol_plane_disable_legacy_authorization.rego | 2 +- .../policy/control_plane_endpoint.rego | 4 +-- .../policy/control_plane_redundancy.rego | 6 ++-- gke-policies-v2/policy/ilb_subsetting.rego | 2 +- .../policy/monitoring_and_logging.rego | 2 +- .../policy/nap_forbid_default_sa.rego | 2 +- .../policy/nap_forbid_single_zone.rego | 2 +- .../policy/nap_integrity_monitoring.rego | 4 +-- gke-policies-v2/policy/nap_use_cos.rego | 4 +-- gke-policies-v2/policy/network_policies.rego | 2 +- .../policy/node_local_dns_cache.rego | 2 +- .../policy/node_pool_autorepair.rego | 9 +++--- .../policy/node_pool_autoscaling.rego | 5 +-- .../policy/node_pool_autoupgrade.rego | 6 ++-- .../policy/node_pool_forbid_default_sa.rego | 3 +- .../node_pool_integrity_monitoring.rego | 7 ++-- .../policy/node_pool_multi_zone.rego | 7 ++-- .../policy/node_pool_secure_boot.rego | 7 ++-- gke-policies-v2/policy/node_pool_use_cos.rego | 7 ++-- .../policy/node_pool_version_skew.rego | 2 +- .../policy/node_rbac_security_group.rego | 4 +-- gke-policies-v2/policy/private_cluster.rego | 2 +- gke-policies-v2/policy/secret_encryption.rego | 2 +- gke-policies-v2/policy/shielded_nodes.rego | 2 +- .../policy/vpc_native_cluster.rego | 3 +- gke-policies-v2/policy/workload_identity.rego | 2 +- gke-policies-v2/rule/cluster/location.rego | 4 +-- .../scalability/limit_namespaces.rego | 8 ++--- .../scalability/limit_secrets_encryption.rego | 8 ++--- .../scalability/limit_services.rego | 8 ++--- .../scalability/limit_services_per_ns.rego | 8 ++--- .../scalability/limits_containers.rego | 12 +++---- gke-policies-v2/scalability/limits_hpas.rego | 10 +++--- gke-policies-v2/scalability/limits_nodes.rego | 26 +++++++-------- .../limits_nodes_per_pool_zone.rego | 8 ++--- gke-policies-v2/scalability/limits_pods.rego | 12 +++---- .../scalability/limits_pods_per_node.rego | 4 +-- 48 files changed, 165 insertions(+), 120 deletions(-) create mode 100644 .regal/config.yaml diff --git a/.github/workflows/policy-test.yml b/.github/workflows/policy-test.yml index 1e7fb4d5..976bad82 100644 --- a/.github/workflows/policy-test.yml +++ b/.github/workflows/policy-test.yml @@ -31,7 +31,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup OPA uses: open-policy-agent/setup-opa@v2 with: @@ -39,4 +39,9 @@ jobs: - name: Run Policy tests - v1 policies run: opa test ${{ env.GKE_POLICY_DIRECTORY_V1 }} -v - name: Run Policy tests - v2 policies - run: opa test ${{ env.GKE_POLICY_DIRECTORY_V2 }} -v \ No newline at end of file + run: opa test ${{ env.GKE_POLICY_DIRECTORY_V2 }} -v + - name: Setup Regal + uses: StyraInc/setup-regal@v0.2.0 + with: + version: v0.10.1 + - run: regal lint --format github ${{ env.GKE_POLICY_DIRECTORY_V2 }} diff --git a/.regal/config.yaml b/.regal/config.yaml new file mode 100644 index 00000000..f2fbfb99 --- /dev/null +++ b/.regal/config.yaml @@ -0,0 +1,32 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +rules: + idiomatic: + no-defined-entrypoint: + # not applicable to this project + level: ignore + style: + detached-metadata: + # style preference only + level: ignore + line-length: + level: ignore + opa-fmt: + level: ignore + prefer-some-in-iteration: + level: ignore + testing: + test-outside-test-package: + level: ignore diff --git a/gke-policies-v2/AUTHORING.md b/gke-policies-v2/AUTHORING.md index 51b31149..cebd5e5f 100644 --- a/gke-policies-v2/AUTHORING.md +++ b/gke-policies-v2/AUTHORING.md @@ -76,7 +76,7 @@ Below is an example of a valid GKE Policy file. # group: Security package gke.policy.control_plane_access -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/autopilot_cluster.rego b/gke-policies-v2/policy/autopilot_cluster.rego index f87262c4..a4e759f7 100644 --- a/gke-policies-v2/policy/autopilot_cluster.rego +++ b/gke-policies-v2/policy/autopilot_cluster.rego @@ -19,7 +19,7 @@ # group: Management # severity: Medium # recommendation: > -# Autopilot mode (recommended): GKE manages the underlying infrastructure such as node configuration, +# Autopilot mode (recommended): GKE manages the underlying infrastructure such as node configuration, # autoscaling, auto-upgrades, baseline security configurations, and baseline networking configuration. # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/choose-cluster-mode # sccCategory: AUTOPILOT_DISABLED @@ -27,7 +27,7 @@ package gke.policy.autopilot -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/cluster_binary_authorization.rego b/gke-policies-v2/policy/cluster_binary_authorization.rego index dbdbcecf..04c10e49 100644 --- a/gke-policies-v2/policy/cluster_binary_authorization.rego +++ b/gke-policies-v2/policy/cluster_binary_authorization.rego @@ -31,7 +31,7 @@ package gke.policy.cluster_binary_authorization -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/cluster_gce_csi_driver.rego b/gke-policies-v2/policy/cluster_gce_csi_driver.rego index faa96575..0e70d2c8 100644 --- a/gke-policies-v2/policy/cluster_gce_csi_driver.rego +++ b/gke-policies-v2/policy/cluster_gce_csi_driver.rego @@ -28,7 +28,7 @@ package gke.policy.cluster_gce_csi_driver -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/cluster_gce_csi_driver_test.rego b/gke-policies-v2/policy/cluster_gce_csi_driver_test.rego index af66a152..89fc78ce 100644 --- a/gke-policies-v2/policy/cluster_gce_csi_driver_test.rego +++ b/gke-policies-v2/policy/cluster_gce_csi_driver_test.rego @@ -18,7 +18,7 @@ test_gce_csi_driver_addon_empty { not valid with input as {"data": {"gke": {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{}}}}} } -test_gce_csi_driver_addon_empty { +test_gce_csi_driver_addon_disabled { not valid with input as {"data": {"gke": {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{"enabled":false}}}}} } diff --git a/gke-policies-v2/policy/cluster_maintenance_window.rego b/gke-policies-v2/policy/cluster_maintenance_window.rego index b8041dbf..5f970e1d 100644 --- a/gke-policies-v2/policy/cluster_maintenance_window.rego +++ b/gke-policies-v2/policy/cluster_maintenance_window.rego @@ -30,7 +30,7 @@ package gke.policy.cluster_maintenance_window -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/cluster_receive_updates.rego b/gke-policies-v2/policy/cluster_receive_updates.rego index 64bff614..86327044 100644 --- a/gke-policies-v2/policy/cluster_receive_updates.rego +++ b/gke-policies-v2/policy/cluster_receive_updates.rego @@ -30,16 +30,18 @@ package gke.policy.cluster_receive_updates -default valid = false +default valid := false valid { count(violation) == 0 } violation[msg] { - not input.data.gke.notification_config.pubsub.enabled + not input.data.gke.notification_config.pubsub.enabled msg := "Pub/Sub notifications are not enabled" -} { - not input.data.gke.notification_config.pubsub.topic +} + +violation[msg] { + not input.data.gke.notification_config.pubsub.topic msg := "Pub/Sub topic is not configured" } diff --git a/gke-policies-v2/policy/cluster_release_channels.rego b/gke-policies-v2/policy/cluster_release_channels.rego index 3116c2de..cf24a393 100644 --- a/gke-policies-v2/policy/cluster_release_channels.rego +++ b/gke-policies-v2/policy/cluster_release_channels.rego @@ -32,13 +32,13 @@ package gke.policy.cluster_release_channels -default valid = false +default valid := false valid { count(violation) == 0 } violation[msg] { - not input.data.gke.release_channel.channel + not input.data.gke.release_channel.channel msg := "GKE cluster is not enrolled in release channel" } diff --git a/gke-policies-v2/policy/control_plane_access.rego b/gke-policies-v2/policy/control_plane_access.rego index 7d55cde4..8bbda4ea 100644 --- a/gke-policies-v2/policy/control_plane_access.rego +++ b/gke-policies-v2/policy/control_plane_access.rego @@ -33,7 +33,7 @@ package gke.policy.control_plane_access -default valid = false +default valid := false valid { count(violation) == 0 @@ -41,15 +41,15 @@ valid { violation[msg] { not input.data.gke.master_authorized_networks_config.enabled - msg := "GKE cluster has not enabled master authorized networks configuration" + msg := "GKE cluster has not enabled master authorized networks configuration" } violation[msg] { not input.data.gke.master_authorized_networks_config.cidr_blocks - msg := "GKE cluster's master authorized networks has no CIDR blocks element" + msg := "GKE cluster's master authorized networks has no CIDR blocks element" } violation[msg] { count(input.data.gke.master_authorized_networks_config.cidr_blocks) < 1 - msg := "GKE cluster's master authorized networks has no CIDR blocks defined" + msg := "GKE cluster's master authorized networks has no CIDR blocks defined" } diff --git a/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego b/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego index 39037bd7..ef58814e 100644 --- a/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego +++ b/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego @@ -31,7 +31,7 @@ package gke.policy.disable_legacy_authorization -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/control_plane_endpoint.rego b/gke-policies-v2/policy/control_plane_endpoint.rego index 8699d042..9e0921ba 100644 --- a/gke-policies-v2/policy/control_plane_endpoint.rego +++ b/gke-policies-v2/policy/control_plane_endpoint.rego @@ -31,7 +31,7 @@ package gke.policy.control_plane_endpoint -default valid = false +default valid := false valid { count(violation) == 0 @@ -39,5 +39,5 @@ valid { violation[msg] { not input.data.gke.private_cluster_config.enable_private_endpoint - msg := "GKE cluster has not enabled private endpoint" + msg := "GKE cluster has not enabled private endpoint" } diff --git a/gke-policies-v2/policy/control_plane_redundancy.rego b/gke-policies-v2/policy/control_plane_redundancy.rego index 676235ed..5489a19b 100644 --- a/gke-policies-v2/policy/control_plane_redundancy.rego +++ b/gke-policies-v2/policy/control_plane_redundancy.rego @@ -27,9 +27,9 @@ package gke.policy.control_plane_redundancy -import data.gke.rule.cluster.location.regional +import data.gke.rule.cluster.location -default valid = false +default valid := false valid { count(violation) == 0 @@ -41,6 +41,6 @@ violation[msg] { } violation[msg] { - not regional(input.data.gke.location) + not location.regional(input.data.gke.location) msg := sprintf("Invalid GKE Control plane location %q (not regional)", [input.data.gke.location]) } diff --git a/gke-policies-v2/policy/ilb_subsetting.rego b/gke-policies-v2/policy/ilb_subsetting.rego index d311415c..f1a9c171 100644 --- a/gke-policies-v2/policy/ilb_subsetting.rego +++ b/gke-policies-v2/policy/ilb_subsetting.rego @@ -28,7 +28,7 @@ package gke.policy.enable_ilb_subsetting -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/monitoring_and_logging.rego b/gke-policies-v2/policy/monitoring_and_logging.rego index df0c21fc..9cc35778 100644 --- a/gke-policies-v2/policy/monitoring_and_logging.rego +++ b/gke-policies-v2/policy/monitoring_and_logging.rego @@ -35,7 +35,7 @@ package gke.policy.logging_and_monitoring -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/nap_forbid_default_sa.rego b/gke-policies-v2/policy/nap_forbid_default_sa.rego index bfb877c8..b9a54818 100644 --- a/gke-policies-v2/policy/nap_forbid_default_sa.rego +++ b/gke-policies-v2/policy/nap_forbid_default_sa.rego @@ -32,7 +32,7 @@ package gke.policy.nap_forbid_default_sa -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/nap_forbid_single_zone.rego b/gke-policies-v2/policy/nap_forbid_single_zone.rego index 1e9ce7ab..5e775705 100644 --- a/gke-policies-v2/policy/nap_forbid_single_zone.rego +++ b/gke-policies-v2/policy/nap_forbid_single_zone.rego @@ -31,7 +31,7 @@ package gke.policy.nap_forbid_single_zone -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/nap_integrity_monitoring.rego b/gke-policies-v2/policy/nap_integrity_monitoring.rego index c59a29c8..32f7fba8 100644 --- a/gke-policies-v2/policy/nap_integrity_monitoring.rego +++ b/gke-policies-v2/policy/nap_integrity_monitoring.rego @@ -36,7 +36,7 @@ package gke.policy.nap_integrity_monitoring -default valid = false +default valid := false valid { count(violation) == 0 @@ -45,6 +45,6 @@ valid { violation[msg] { input.data.gke.autoscaling.enable_node_autoprovisioning == true input.data.gke.autoscaling.autoprovisioning_node_pool_defaults.shielded_instance_config.enable_integrity_monitoring == false - + msg := "GKE cluster Node Auto-Provisioning configuration use integrity monitoring" } diff --git a/gke-policies-v2/policy/nap_use_cos.rego b/gke-policies-v2/policy/nap_use_cos.rego index 5f725714..69109f57 100644 --- a/gke-policies-v2/policy/nap_use_cos.rego +++ b/gke-policies-v2/policy/nap_use_cos.rego @@ -33,7 +33,7 @@ package gke.policy.nap_use_cos import future.keywords.in -default valid = false +default valid := false valid { count(violation) == 0 @@ -42,6 +42,6 @@ valid { violation[msg] { input.data.gke.autoscaling.enable_node_autoprovisioning == true not lower(input.data.gke.autoscaling.autoprovisioning_node_pool_defaults.image_type) in { "cos", "cos_containerd"} - + msg := "GKE cluster Node Auto-Provisioning configuration use Container-Optimized OS" } diff --git a/gke-policies-v2/policy/network_policies.rego b/gke-policies-v2/policy/network_policies.rego index f065adaa..c8abace9 100644 --- a/gke-policies-v2/policy/network_policies.rego +++ b/gke-policies-v2/policy/network_policies.rego @@ -31,7 +31,7 @@ package gke.policy.network_policies_engine -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/node_local_dns_cache.rego b/gke-policies-v2/policy/node_local_dns_cache.rego index 71d97027..6a0bf18d 100644 --- a/gke-policies-v2/policy/node_local_dns_cache.rego +++ b/gke-policies-v2/policy/node_local_dns_cache.rego @@ -28,7 +28,7 @@ package gke.policy.node_local_dns_cache -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/node_pool_autorepair.rego b/gke-policies-v2/policy/node_pool_autorepair.rego index b39e8bd6..af6f1d83 100644 --- a/gke-policies-v2/policy/node_pool_autorepair.rego +++ b/gke-policies-v2/policy/node_pool_autorepair.rego @@ -32,15 +32,14 @@ package gke.policy.node_pool_autorepair -default valid = false +default valid := false valid { count(violation) == 0 } -violation[msg] { +violation[msg] { + some pool not input.data.gke.node_pools[pool].management.auto_repair msg := sprintf("autorepair not set for GKE node pool %q", [input.data.gke.node_pools[pool].name]) -} - - +} diff --git a/gke-policies-v2/policy/node_pool_autoscaling.rego b/gke-policies-v2/policy/node_pool_autoscaling.rego index 676dad42..2cf10414 100644 --- a/gke-policies-v2/policy/node_pool_autoscaling.rego +++ b/gke-policies-v2/policy/node_pool_autoscaling.rego @@ -30,13 +30,14 @@ package gke.policy.node_pool_autoscaling -default valid = false +default valid := false valid { count(violation) == 0 } violation[msg] { + some pool not input.data.gke.node_pools[pool].autoscaling.enabled msg := sprintf("Node pool %q does not have autoscaling configured.", [input.data.gke.node_pools[pool].name]) -} \ No newline at end of file +} diff --git a/gke-policies-v2/policy/node_pool_autoupgrade.rego b/gke-policies-v2/policy/node_pool_autoupgrade.rego index 6f6d0046..906e63f6 100644 --- a/gke-policies-v2/policy/node_pool_autoupgrade.rego +++ b/gke-policies-v2/policy/node_pool_autoupgrade.rego @@ -32,14 +32,14 @@ package gke.policy.node_pool_autoupgrade -default valid = false +default valid := false valid { count(violation) == 0 } violation[msg] { + some pool not input.data.gke.node_pools[pool].management.auto_upgrade msg := sprintf("autoupgrade not set for GKE node pool %q", [input.data.gke.node_pools[pool].name]) -} - +} diff --git a/gke-policies-v2/policy/node_pool_forbid_default_sa.rego b/gke-policies-v2/policy/node_pool_forbid_default_sa.rego index e7d6af66..86ce02c4 100644 --- a/gke-policies-v2/policy/node_pool_forbid_default_sa.rego +++ b/gke-policies-v2/policy/node_pool_forbid_default_sa.rego @@ -32,7 +32,7 @@ package gke.policy.node_pool_forbid_default_sa -default valid = false +default valid := false valid { count(violation) == 0 @@ -40,6 +40,7 @@ valid { violation[msg] { not input.data.gke.autopilot.enabled + some pool input.data.gke.node_pools[pool].config.service_account == "default" msg := sprintf("GKE cluster node_pool %q should have a dedicated SA", [input.data.gke.node_pools[pool].name]) } diff --git a/gke-policies-v2/policy/node_pool_integrity_monitoring.rego b/gke-policies-v2/policy/node_pool_integrity_monitoring.rego index 523f6bba..9fc0b402 100644 --- a/gke-policies-v2/policy/node_pool_integrity_monitoring.rego +++ b/gke-policies-v2/policy/node_pool_integrity_monitoring.rego @@ -31,13 +31,14 @@ package gke.policy.node_pool_integrity_monitoring -default valid = false +default valid := false valid { count(violation) == 0 } -violation[msg] { +violation[msg] { + some pool not input.data.gke.node_pools[pool].config.shielded_instance_config.enable_integrity_monitoring msg := sprintf("Node pool %q has disabled integrity monitoring feature.", [input.data.gke.node_pools[pool].name]) -} +} diff --git a/gke-policies-v2/policy/node_pool_multi_zone.rego b/gke-policies-v2/policy/node_pool_multi_zone.rego index e1e5190d..e998b0e9 100644 --- a/gke-policies-v2/policy/node_pool_multi_zone.rego +++ b/gke-policies-v2/policy/node_pool_multi_zone.rego @@ -29,13 +29,14 @@ package gke.policy.node_pool_multi_zone -default valid = false +default valid := false valid { count(violation) == 0 } -violation[msg] { +violation[msg] { + some pool count(input.data.gke.node_pools[pool].locations) < 2 msg := sprintf("Node pool %q is not on multiple zones.", [input.data.gke.node_pools[pool].name]) -} \ No newline at end of file +} \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_secure_boot.rego b/gke-policies-v2/policy/node_pool_secure_boot.rego index b7b210cf..b13a66d1 100644 --- a/gke-policies-v2/policy/node_pool_secure_boot.rego +++ b/gke-policies-v2/policy/node_pool_secure_boot.rego @@ -31,13 +31,14 @@ package gke.policy.node_pool_secure_boot -default valid = false +default valid := false valid { count(violation) == 0 } -violation[msg] { +violation[msg] { + some pool not input.data.gke.node_pools[pool].config.shielded_instance_config.enable_secure_boot msg := sprintf("Node pool %q has disabled secure boot.", [input.data.gke.node_pools[pool].name]) -} +} diff --git a/gke-policies-v2/policy/node_pool_use_cos.rego b/gke-policies-v2/policy/node_pool_use_cos.rego index 0efcb9eb..338d01a8 100644 --- a/gke-policies-v2/policy/node_pool_use_cos.rego +++ b/gke-policies-v2/policy/node_pool_use_cos.rego @@ -35,13 +35,14 @@ package gke.policy.node_pool_use_cos import future.keywords.in -default valid = false +default valid := false valid { count(violation) == 0 } -violation[msg] { +violation[msg] { + some pool not lower(input.data.gke.node_pools[pool].config.image_type) in {"cos", "cos_containerd"} msg := sprintf("Node pool %q does not use Container-Optimized OS.", [input.data.gke.node_pools[pool].name]) -} \ No newline at end of file +} diff --git a/gke-policies-v2/policy/node_pool_version_skew.rego b/gke-policies-v2/policy/node_pool_version_skew.rego index fdd7ef3d..e97264d0 100644 --- a/gke-policies-v2/policy/node_pool_version_skew.rego +++ b/gke-policies-v2/policy/node_pool_version_skew.rego @@ -31,7 +31,7 @@ package gke.policy.node_pool_version_skew -default valid = false +default valid := false expr := `^([0-9]+)\.([0-9]+)\.([0-9]+)(-.+)*$` diff --git a/gke-policies-v2/policy/node_rbac_security_group.rego b/gke-policies-v2/policy/node_rbac_security_group.rego index 9699d5de..fff28616 100644 --- a/gke-policies-v2/policy/node_rbac_security_group.rego +++ b/gke-policies-v2/policy/node_rbac_security_group.rego @@ -34,13 +34,13 @@ package gke.policy.rbac_security_group_enabled -default valid = false +default valid := false valid { count(violation) == 0 } -violation[msg] { +violation[msg] { not input.data.gke.authenticator_groups_config.enabled msg := sprintf("RBAC security group not enabled for cluster %q", [input.data.gke.name]) } \ No newline at end of file diff --git a/gke-policies-v2/policy/private_cluster.rego b/gke-policies-v2/policy/private_cluster.rego index 2c59ec9f..050c1dd1 100644 --- a/gke-policies-v2/policy/private_cluster.rego +++ b/gke-policies-v2/policy/private_cluster.rego @@ -30,7 +30,7 @@ package gke.policy.private_cluster -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/secret_encryption.rego b/gke-policies-v2/policy/secret_encryption.rego index 7183cf69..2fb0e410 100644 --- a/gke-policies-v2/policy/secret_encryption.rego +++ b/gke-policies-v2/policy/secret_encryption.rego @@ -33,7 +33,7 @@ package gke.policy.secret_encryption -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/shielded_nodes.rego b/gke-policies-v2/policy/shielded_nodes.rego index ab232356..c0a484a5 100644 --- a/gke-policies-v2/policy/shielded_nodes.rego +++ b/gke-policies-v2/policy/shielded_nodes.rego @@ -31,7 +31,7 @@ package gke.policy.shielded_nodes -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/policy/vpc_native_cluster.rego b/gke-policies-v2/policy/vpc_native_cluster.rego index 59cad735..4023f6df 100644 --- a/gke-policies-v2/policy/vpc_native_cluster.rego +++ b/gke-policies-v2/policy/vpc_native_cluster.rego @@ -28,13 +28,14 @@ package gke.policy.vpc_native_cluster -default valid = false +default valid := false valid { count(violation) == 0 } violation[msg] { + some pool not input.data.gke.node_pools[pool].network_config.pod_ipv4_cidr_block msg := sprintf("Nodepool %q of the GKE cluster is not configured to use VPC-native routing", [input.data.gke.node_pools[pool].name]) } diff --git a/gke-policies-v2/policy/workload_identity.rego b/gke-policies-v2/policy/workload_identity.rego index bc49ef9b..b2440a5a 100644 --- a/gke-policies-v2/policy/workload_identity.rego +++ b/gke-policies-v2/policy/workload_identity.rego @@ -33,7 +33,7 @@ package gke.policy.workload_identity -default valid = false +default valid := false valid { count(violation) == 0 diff --git a/gke-policies-v2/rule/cluster/location.rego b/gke-policies-v2/rule/cluster/location.rego index 7f5557e6..f3d80dad 100644 --- a/gke-policies-v2/rule/cluster/location.rego +++ b/gke-policies-v2/rule/cluster/location.rego @@ -15,9 +15,9 @@ package gke.rule.cluster.location regional(location) { - regex.match("^[^-]+-[^-]+$", location) + regex.match(`^[^-]+-[^-]+$`, location) } zonal(location) { - regex.match("^[^-]+-[^-]+-[^-]+$", location) + regex.match(`^[^-]+-[^-]+-[^-]+$`, location) } diff --git a/gke-policies-v2/scalability/limit_namespaces.rego b/gke-policies-v2/scalability/limit_namespaces.rego index b5446bc6..962b00f2 100644 --- a/gke-policies-v2/scalability/limit_namespaces.rego +++ b/gke-policies-v2/scalability/limit_namespaces.rego @@ -27,16 +27,16 @@ package gke.scalability.namespaces -default valid = false -default limit = 10000 -default threshold = 80 +default valid := false +default limit := 10000 +default threshold := 80 valid { count(violation) == 0 } violation[msg] { - warn_limit = round(limit * threshold * 0.01) + warn_limit := round(limit * threshold * 0.01) input.data.monitoring.namespaces.scalar > warn_limit msg := sprintf("Total number of namespaces %d has reached warning level %d (limit is %d)", [input.data.monitoring.namespaces.scalar, warn_limit, limit]) } diff --git a/gke-policies-v2/scalability/limit_secrets_encryption.rego b/gke-policies-v2/scalability/limit_secrets_encryption.rego index 744cc9d9..dda17c96 100644 --- a/gke-policies-v2/scalability/limit_secrets_encryption.rego +++ b/gke-policies-v2/scalability/limit_secrets_encryption.rego @@ -27,16 +27,16 @@ package gke.scalability.secrets_with_enc -default valid = false -default limit = 30000 -default threshold = 80 +default valid := false +default limit := 30000 +default threshold := 80 valid { count(violation) == 0 } violation[msg] { - warn_limit = round(limit * threshold * 0.01) + warn_limit := round(limit * threshold * 0.01) secrets_cnt := input.data.monitoring.secrets.scalar input.data.gke.database_encryption.state == 1 secrets_cnt> warn_limit diff --git a/gke-policies-v2/scalability/limit_services.rego b/gke-policies-v2/scalability/limit_services.rego index 76f3aa14..08d59c2b 100644 --- a/gke-policies-v2/scalability/limit_services.rego +++ b/gke-policies-v2/scalability/limit_services.rego @@ -28,16 +28,16 @@ package gke.scalability.services -default valid = false -default limit = 10000 -default threshold = 80 +default valid := false +default limit := 10000 +default threshold := 80 valid { count(violation) == 0 } violation[msg] { - warn_limit = round(limit * threshold * 0.01) + warn_limit := round(limit * threshold * 0.01) input.data.monitoring.services.scalar > warn_limit msg := sprintf("Total number of services %d has reached warning level %d (limit is %d)", [input.data.monitoring.services.scalar, warn_limit, limit]) } diff --git a/gke-policies-v2/scalability/limit_services_per_ns.rego b/gke-policies-v2/scalability/limit_services_per_ns.rego index f7374dc9..fac6e461 100644 --- a/gke-policies-v2/scalability/limit_services_per_ns.rego +++ b/gke-policies-v2/scalability/limit_services_per_ns.rego @@ -28,16 +28,16 @@ package gke.scalability.services_per_ns -default valid = false -default limit = 5000 -default threshold = 80 +default valid := false +default limit := 5000 +default threshold := 80 valid { count(violation) == 0 } violation[msg] { - warn_limit = round(limit * threshold * 0.01) + warn_limit := round(limit * threshold * 0.01) some namespace srv_cnt := input.data.monitoring.services_per_ns.vector[namespace] srv_cnt > warn_limit diff --git a/gke-policies-v2/scalability/limits_containers.rego b/gke-policies-v2/scalability/limits_containers.rego index 442cbeae..35173312 100644 --- a/gke-policies-v2/scalability/limits_containers.rego +++ b/gke-policies-v2/scalability/limits_containers.rego @@ -27,24 +27,24 @@ package gke.scalability.containers -default valid = false -default limit_standard = 400000 -default limit_autopilot = 24000 -default threshold = 80 +default valid := false +default limit_standard := 400000 +default limit_autopilot := 24000 +default threshold := 80 valid { count(violation) == 0 } violation[msg] { - warn_limit = round(limit_standard * threshold * 0.01) + warn_limit := round(limit_standard * threshold * 0.01) not input.data.gke.autopilot.enabled input.data.monitoring.containers.scalar > warn_limit msg := sprintf("Total number of containers %d has reached warning level %d (limit is %d for standard clusters)", [input.data.monitoring.containers.scalar, warn_limit, limit_standard]) } violation[msg] { - warn_limit = round(limit_autopilot * threshold * 0.01) + warn_limit := round(limit_autopilot * threshold * 0.01) input.data.gke.autopilot.enabled input.data.monitoring.containers.scalar > warn_limit msg := sprintf("Total number of containers %d has reached warning level %d (limit is %d for autopilot clusters)", [input.data.monitoring.containers.scalar, warn_limit, limit_autopilot]) diff --git a/gke-policies-v2/scalability/limits_hpas.rego b/gke-policies-v2/scalability/limits_hpas.rego index 0d556ef1..c29b00ad 100644 --- a/gke-policies-v2/scalability/limits_hpas.rego +++ b/gke-policies-v2/scalability/limits_hpas.rego @@ -27,17 +27,17 @@ package gke.scalability.hpas -default valid = false -default limit = 300 -default threshold = 80 +default valid := false +default limit := 300 +default threshold := 80 valid { count(violation) == 0 } violation[msg] { - warn_limit = round(limit * threshold * 0.01) - hpas := input.data.monitoring.hpas.scalar + warn_limit := round(limit * threshold * 0.01) + hpas := input.data.monitoring.hpas.scalar hpas > warn_limit msg := sprintf("Total number of HPAs %d has reached warning level %d (limit is %d)", [hpas, warn_limit, limit]) } diff --git a/gke-policies-v2/scalability/limits_nodes.rego b/gke-policies-v2/scalability/limits_nodes.rego index e2eeca55..35124e75 100644 --- a/gke-policies-v2/scalability/limits_nodes.rego +++ b/gke-policies-v2/scalability/limits_nodes.rego @@ -29,38 +29,38 @@ package gke.scalability.nodes -default valid = false +default valid := false -default private_nodes_limit = 15000 -default public_nodes_limit = 5000 -default autopilot_nodes_limit = 1000 -default threshold = 80 +default private_nodes_limit := 15000 +default public_nodes_limit := 5000 +default autopilot_nodes_limit := 1000 +default threshold := 80 valid { count(violation) == 0 } violation[msg] { - warn_limit = round(private_nodes_limit * threshold * 0.01) - nodes := input.data.monitoring.nodes.scalar + warn_limit := round(private_nodes_limit * threshold * 0.01) + nodes := input.data.monitoring.nodes.scalar is_private := input.data.gke.private_cluster_config.enable_private_nodes - is_private = true + is_private = true nodes > warn_limit msg := sprintf("nodes found: %d higher than the limit for private clusters: %d", [nodes, warn_limit]) } violation[msg] { - warn_limit = round(public_nodes_limit * threshold * 0.01) - nodes := input.data.monitoring.nodes.scalar + warn_limit := round(public_nodes_limit * threshold * 0.01) + nodes := input.data.monitoring.nodes.scalar is_private := input.data.gke.private_cluster_config.enable_private_nodes - is_private = false + is_private = false nodes > warn_limit msg := sprintf("nodes found: %d higher than the limit for non private clusters: %d", [nodes, warn_limit]) } violation[msg] { - warn_limit = round(autopilot_nodes_limit * threshold * 0.01) - nodes := input.data.monitoring.nodes.scalar + warn_limit := round(autopilot_nodes_limit * threshold * 0.01) + nodes := input.data.monitoring.nodes.scalar input.data.gke.autopilot.enabled nodes > warn_limit msg := sprintf("nodes found: %d higher than the warn limit for autopilot clusters: %d", [nodes, warn_limit]) diff --git a/gke-policies-v2/scalability/limits_nodes_per_pool_zone.rego b/gke-policies-v2/scalability/limits_nodes_per_pool_zone.rego index d80ba9b8..452c8bb8 100644 --- a/gke-policies-v2/scalability/limits_nodes_per_pool_zone.rego +++ b/gke-policies-v2/scalability/limits_nodes_per_pool_zone.rego @@ -28,16 +28,16 @@ package gke.scalability.nodes_per_pool_zone -default valid = false -default limit = 1000 -default threshold = 80 +default valid := false +default limit := 1000 +default threshold := 80 valid { count(violation) == 0 } violation[msg] { - warn_limit = round(limit * threshold * 0.01) + warn_limit := round(limit * threshold * 0.01) some nodepool, zone not input.data.gke.autopilot.enabled nodes_cnt := input.data.monitoring.nodes_per_pool_zone.vector[nodepool][zone] diff --git a/gke-policies-v2/scalability/limits_pods.rego b/gke-policies-v2/scalability/limits_pods.rego index 1430d6d5..b1abbaec 100644 --- a/gke-policies-v2/scalability/limits_pods.rego +++ b/gke-policies-v2/scalability/limits_pods.rego @@ -27,24 +27,24 @@ package gke.scalability.pods -default valid = false -default limit_standard = 200000 -default limit_autopilot = 12000 -default threshold = 80 +default valid := false +default limit_standard := 200000 +default limit_autopilot := 12000 +default threshold := 80 valid { count(violation) == 0 } violation[msg] { - warn_limit = round(limit_standard * threshold * 0.01) + warn_limit := round(limit_standard * threshold * 0.01) not input.data.gke.autopilot.enabled input.data.monitoring.pods.scalar > warn_limit msg := sprintf("Total number of pods %d has reached warning level %d (limit is %d for standard clusters)", [input.data.monitoring.pods.scalar, warn_limit, limit_standard]) } violation[msg] { - warn_limit = round(limit_autopilot * threshold * 0.01) + warn_limit := round(limit_autopilot * threshold * 0.01) input.data.gke.autopilot input.data.gke.autopilot.enabled input.data.monitoring.pods.scalar > warn_limit diff --git a/gke-policies-v2/scalability/limits_pods_per_node.rego b/gke-policies-v2/scalability/limits_pods_per_node.rego index 1add98fe..9c0cb4ef 100644 --- a/gke-policies-v2/scalability/limits_pods_per_node.rego +++ b/gke-policies-v2/scalability/limits_pods_per_node.rego @@ -28,8 +28,8 @@ package gke.scalability.pods_per_node -default valid = false -default threshold = 80 +default valid := false +default threshold := 80 valid { count(violation) == 0