From 9034cc4e9fd84c32ed9944a9745d8b70c994a85c Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Fri, 2 Aug 2024 06:06:06 +0800 Subject: [PATCH] Cherry pick #2081 #2046 #2091 #2072 (#2108) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update helm docs (#2081) Signed-off-by: Carlos Sánchez Páez (cherry picked from commit eca3fc8702a18bded7320f7835d47ebc33c7531f) * Update the process to build api-docs, generate CRD manifests and code (#2046) * Update .gitignore Signed-off-by: Yi Chen * Update .dockerignore Signed-off-by: Yi Chen * Update Makefile Signed-off-by: Yi Chen * Update the process to generate api docs Signed-off-by: Yi Chen * Update the workflow to generate api docs Signed-off-by: Yi Chen * Use controller-gen to generate CRD and deep copy related methods Signed-off-by: Yi Chen * Update helm chart CRDs Signed-off-by: Yi Chen * Update workflow for building spark operator Signed-off-by: Yi Chen * Update README.md Signed-off-by: Yi Chen --------- Signed-off-by: Yi Chen (cherry picked from commit 779ea3debc7885f422a759e6cac6cbcd466c8e5c) * Add topologySpreadConstraints (#2091) * Update README and documentation (#2047) * Update docs Signed-off-by: Yi Chen * Remove docs and update README Signed-off-by: Yi Chen * Add link to monthly community meeting Signed-off-by: Yi Chen --------- Signed-off-by: Yi Chen Signed-off-by: jbhalodia-slack * Add PodDisruptionBudget to chart (#2078) * Add PodDisruptionBudget to chart Signed-off-by: Carlos Sánchez Páez Signed-off-by: Carlos Sánchez Páez Signed-off-by: Carlos Sánchez Páez * PR comments Signed-off-by: Carlos Sánchez Páez --------- Signed-off-by: Carlos Sánchez Páez Signed-off-by: Carlos Sánchez Páez Signed-off-by: jbhalodia-slack * Set topologySpreadConstraints Signed-off-by: jbhalodia-slack * Update README and increase patch version Signed-off-by: jbhalodia-slack * Revert replicaCount change Signed-off-by: jbhalodia-slack * Update README after master merger Signed-off-by: jbhalodia-slack * Update README Signed-off-by: jbhalodia-slack --------- Signed-off-by: Yi Chen Signed-off-by: jbhalodia-slack Signed-off-by: Carlos Sánchez Páez Signed-off-by: Carlos Sánchez Páez Co-authored-by: Yi Chen Co-authored-by: Carlos Sánchez Páez (cherry picked from commit 4108f5493706f463cc2ceb823b66b5e6ac8fb1ca) * Use controller-runtime to reconsturct spark operator (#2072) * Use controller-runtime to reconstruct spark operator Signed-off-by: Yi Chen * Update helm charts Signed-off-by: Yi Chen * Update examples Signed-off-by: Yi Chen --------- Signed-off-by: Yi Chen (cherry picked from commit 0dc641bd1d7d262e4a8c5564c926fc4716bcf722) --------- Co-authored-by: Carlos Sánchez Páez Co-authored-by: jbhalodia-slack --- .dockerignore | 32 +- .github/workflows/main.yaml | 18 +- .gitignore | 12 +- .golangci.yaml | 39 + .pre-commit-config.yaml | 1 + Dockerfile | 25 +- Makefile | 350 +- PROJECT | 47 + README.md | 2 +- .../v1beta1/defaults.go | 0 .../v1beta1/doc.go | 0 api/v1beta1/groupversion_info.go | 36 + .../v1beta1/register.go | 28 +- .../scheduledsparkapplication_types.go | 104 + .../v1beta1/sparkapplication_types.go | 305 +- .../v1beta1/zz_generated.deepcopy.go | 30 +- .../v1beta2/defaults.go | 10 +- .../v1beta2/defaults_test.go | 18 +- .../v1beta2/doc.go | 0 api/v1beta2/groupversion_info.go | 36 + .../v1beta2/pod_webhook.go | 6 +- .../v1beta2/register.go | 28 +- .../scheduledsparkapplication_types.go | 125 + .../v1beta2/sparkapplication_types.go | 445 +- .../v1beta2/zz_generated.deepcopy.go | 162 +- charts/spark-operator-chart/Chart.yaml | 40 +- charts/spark-operator-chart/README.md | 163 +- charts/spark-operator-chart/README.md.gotmpl | 10 +- charts/spark-operator-chart/ci/ci-values.yaml | 2 +- ...tor.k8s.io_scheduledsparkapplications.yaml | 15140 ++++++++++++---- ...parkoperator.k8s.io_sparkapplications.yaml | 15086 +++++++++++---- .../templates/_helpers.tpl | 45 +- .../templates/controller/_helpers.tpl | 70 + .../templates/controller/deployment.yaml | 162 + .../controller/poddisruptionbudget.yaml | 34 + .../templates/controller/rbac.yaml | 201 + .../templates/controller/serviceaccount.yaml | 28 + .../templates/deployment.yaml | 140 - .../templates/poddisruptionbudget.yaml | 17 - .../templates/prometheus-podmonitor.yaml | 19 - .../templates/prometheus/_helpers.tpl | 22 + .../templates/prometheus/podmonitor.yaml | 44 + .../spark-operator-chart/templates/rbac.yaml | 148 - .../templates/serviceaccount.yaml | 12 - .../templates/spark-rbac.yaml | 39 - .../templates/spark-serviceaccount.yaml | 14 - .../templates/spark/_helpers.tpl | 47 + .../templates/spark/rbac.yaml | 73 + .../templates/spark/serviceaccount.yaml | 30 + .../templates/webhook/_helpers.tpl | 107 +- .../templates/webhook/deployment.yaml | 155 + .../webhook/mutatingwebhookconfiguration.yaml | 116 + .../webhook/poddisruptionbudget.yaml | 34 + .../templates/webhook/rbac.yaml | 171 + .../templates/webhook/secret.yaml | 13 - .../templates/webhook/service.yaml | 26 +- .../templates/webhook/serviceaccount.yaml | 28 + .../validatingwebhookconfiguration.yaml | 83 + .../tests/controller/deployment_test.yaml | 537 + .../controller/poddisruptionbudget_test.yaml | 68 + .../tests/controller/rbac_test.yaml | 79 + .../tests/controller/serviceaccount_test.yaml | 67 + .../tests/deployment_test.yaml | 301 - .../tests/poddisruptionbudget_test.yaml | 37 - .../tests/prometheus/podmonitor_test.yaml | 102 + .../spark-operator-chart/tests/rbac_test.yaml | 90 - .../tests/serviceaccount_test.yaml | 54 - .../tests/spark-rbac_test.yaml | 133 - .../tests/spark-serviceaccount_test.yaml | 112 - .../tests/spark/rbac_test.yaml | 123 + .../tests/spark/serviceaccount_test.yaml | 124 + .../tests/webhook/deployment_test.yaml | 504 + .../mutatingwebhookconfiguration_test.yaml | 78 + .../webhook/poddisruptionbudget_test.yaml | 68 + .../tests/webhook/secret_test.yaml | 31 - .../tests/webhook/service_test.yaml | 32 +- .../validatingwebhookconfiguration_test.yaml | 77 + charts/spark-operator-chart/values.yaml | 472 +- cmd/main.go | 31 + .../operator/controller/root.go | 22 +- cmd/operator/controller/start.go | 364 + cmd/operator/root.go | 39 + .../operator/version/root.go | 34 +- cmd/operator/webhook/root.go | 33 + cmd/operator/webhook/start.go | 410 + codecov.yaml | 10 + config/certmanager/certificate.yaml | 35 + config/certmanager/kustomization.yaml | 5 + config/certmanager/kustomizeconfig.yaml | 8 + ...tor.k8s.io_scheduledsparkapplications.yaml | 11614 ++++++++++++ ...parkoperator.k8s.io_sparkapplications.yaml | 11548 ++++++++++++ config/crd/kustomization.yaml | 26 + config/crd/kustomizeconfig.yaml | 19 + .../cainjection_in_sparkapplications.yaml | 7 + .../patches/webhook_in_sparkapplications.yaml | 16 + config/default/manager_webhook_patch.yaml | 23 + config/default/webhookcainjection_patch.yaml | 25 + config/rbac/role.yaml | 130 + ...scheduledsparkapplication_editor_role.yaml | 27 + ...scheduledsparkapplication_viewer_role.yaml | 23 + config/rbac/sparkapplication_editor_role.yaml | 27 + config/rbac/sparkapplication_viewer_role.yaml | 23 + config/samples/kustomization.yaml | 7 + .../v1beta1_scheduledsparkapplication.yaml | 9 + config/samples/v1beta1_sparkapplication.yaml | 23 + .../v1beta2_scheduledsparkapplication.yaml | 34 + config/samples/v1beta2_sparkapplication.yaml | 23 + config/webhook/kustomization.yaml | 6 + config/webhook/kustomizeconfig.yaml | 22 + config/webhook/manifests.yaml | 119 + config/webhook/service.yaml | 15 + docs/api-docs.md | 1943 +- entrypoint.sh | 13 +- examples/spark-operator-with-metrics.yaml | 53 - examples/spark-operator-with-webhook.yaml | 94 - examples/spark-pi-configmap.yaml | 38 +- examples/spark-pi-custom-resource.yaml | 42 +- examples/spark-pi-dynamic-allocation.yaml | 49 + examples/spark-pi-prometheus.yaml | 18 +- ...{spark-py-pi.yaml => spark-pi-python.yaml} | 29 +- ...-schedule.yaml => spark-pi-scheduled.yaml} | 25 +- examples/spark-pi-volcano.yaml | 43 + examples/spark-pi.yaml | 39 +- go.mod | 237 +- go.sum | 582 +- hack/api-docs/Dockerfile | 18 - hack/api-docs/api-docs-config.json | 28 - hack/api-docs/config.json | 28 + .../members.tpl | 0 .../{api-docs-template => template}/pkg.tpl | 2 +- .../placeholder.go | 0 .../{api-docs-template => template}/type.tpl | 0 hack/boilerplate.go.txt | 15 + {pkg => internal}/controller/doc.go | 4 +- .../controller.go | 99 + .../event_filter.go | 56 + .../event_handler.go | 102 + .../scheduledsparkapplication/controller.go | 377 + .../controller_test.go | 90 + .../scheduledsparkapplication/event_filter.go | 81 + .../event_handler.go | 85 + .../scheduledsparkapplication/suite_test.go | 94 + .../controller/sparkapplication/controller.go | 1217 ++ .../sparkapplication/controller_test.go | 290 + .../sparkapplication/driveringress.go | 158 +- .../sparkapplication/driveringress_test.go | 713 + .../sparkapplication/event_filter.go | 207 + .../sparkapplication/event_handler.go | 220 + .../sparkapplication/monitoring_config.go | 89 +- .../monitoring_config_test.go | 255 + .../controller/sparkapplication/submission.go | 1023 ++ .../sparkapplication/submission_test.go | 696 + .../controller/sparkapplication/suite_test.go | 94 + .../controller/sparkapplication/validator.go | 44 + .../controller/sparkapplication/web_ui.go | 92 + .../sparkapplication/web_ui_test.go | 655 + .../controller.go | 100 + .../event_filter.go | 56 + .../event_handler.go | 102 + internal/metrics/metrcis.go | 23 + internal/metrics/sparkapplication_metrics.go | 386 + internal/metrics/sparkpod_metrics.go | 191 + internal/scheduler/registry.go | 75 + internal/scheduler/scheduler.go | 41 + internal/scheduler/volcano/scheduler.go | 229 + .../scheduler/volcano/scheduler_test.go | 44 +- .../scheduler/volcano/util.go | 19 +- {pkg => internal}/webhook/doc.go | 0 internal/webhook/resourcequota.go | 259 + internal/webhook/resourcequota_test.go | 41 + .../scheduledsparkapplication_defaulter.go | 49 + .../scheduledsparkapplication_validator.go | 80 + .../webhook/sparkapplication_defaulter.go | 125 + .../webhook/sparkapplication_validator.go | 173 + internal/webhook/sparkpod_defaulter.go | 732 + .../webhook/sparkpod_defaulter_test.go | 518 +- internal/webhook/suite_test.go | 150 + internal/webhook/webhook.go | 37 + main.go | 312 - manifest/crds/kustomization.yaml | 21 - ...tor.k8s.io_scheduledsparkapplications.yaml | 4496 ----- ...parkoperator.k8s.io_sparkapplications.yaml | 4506 ----- .../spark-application-rbac/kustomization.yaml | 23 - .../spark-application-rbac.yaml | 52 - .../spark-operator-install/kustomization.yaml | 25 - .../spark-operator-rbac.yaml | 97 - .../spark-operator.yaml | 45 - .../kustomization.yaml | 27 - .../spark-operator-patch.yaml | 40 - .../spark-operator-webhook.yaml | 53 - .../spark-operator-with-webhook.yaml | 96 - pkg/batchscheduler/scheduler_manager.go | 80 - .../volcano/volcano_scheduler.go | 307 - pkg/certificate/certificate.go | 307 + pkg/certificate/certificate_test.go | 175 + .../register.go => certificate/doc.go} | 8 +- pkg/certificate/suite_test.go | 94 + pkg/{util/cert.go => certificate/util.go} | 24 +- pkg/certificate/util_test.go | 58 + .../clientset/versioned/fake/register.go | 4 +- .../clientset/versioned/scheme/register.go | 4 +- .../fake/fake_scheduledsparkapplication.go | 2 +- .../v1beta1/fake/fake_sparkapplication.go | 2 +- .../v1beta1/scheduledsparkapplication.go | 2 +- .../v1beta1/sparkapplication.go | 2 +- .../v1beta1/sparkoperator.k8s.io_client.go | 4 +- .../fake/fake_scheduledsparkapplication.go | 2 +- .../v1beta2/fake/fake_sparkapplication.go | 2 +- .../v1beta2/scheduledsparkapplication.go | 2 +- .../v1beta2/sparkapplication.go | 2 +- .../v1beta2/sparkoperator.k8s.io_client.go | 2 +- .../informers/externalversions/generic.go | 12 +- .../v1beta1/scheduledsparkapplication.go | 2 +- .../v1beta1/sparkapplication.go | 2 +- .../v1beta2/scheduledsparkapplication.go | 2 +- .../v1beta2/sparkapplication.go | 2 +- .../v1beta1/scheduledsparkapplication.go | 2 +- .../v1beta1/sparkapplication.go | 2 +- .../v1beta2/scheduledsparkapplication.go | 2 +- .../v1beta2/sparkapplication.go | 2 +- pkg/common/constants.go | 50 + pkg/{config => common}/doc.go | 4 +- pkg/common/event.go | 58 + pkg/common/metrics.go | 49 + pkg/common/prometheus.go | 139 + pkg/common/spark.go | 370 + pkg/common/volcano.go | 23 + pkg/config/config.go | 58 - pkg/config/config_test.go | 74 - pkg/config/constants.go | 317 - pkg/config/secret.go | 74 - pkg/config/secret_test.go | 107 - .../scheduledsparkapplication/controller.go | 425 - .../controller_test.go | 552 - pkg/controller/sparkapplication/controller.go | 1130 -- .../sparkapplication/controller_test.go | 1674 -- .../sparkapplication/driveringress_test.go | 730 - .../monitoring_config_test.go | 267 - .../spark_pod_eventhandler.go | 97 - .../spark_pod_eventhandler_test.go | 288 - .../sparkapplication/sparkapp_metrics.go | 336 - .../sparkapplication/sparkapp_metrics_test.go | 74 - .../sparkapplication/sparkapp_util.go | 218 - .../sparkapplication/sparkapp_util_test.go | 59 - pkg/controller/sparkapplication/sparkui.go | 96 - .../sparkapplication/sparkui_test.go | 673 - pkg/controller/sparkapplication/submission.go | 532 - .../sparkapplication/submission_test.go | 695 - pkg/util/capabilities.go | 25 +- pkg/util/cert_test.go | 39 - pkg/util/histogram_buckets.go | 43 - pkg/util/metrics.go | 191 - pkg/util/metrics_test.go | 68 - .../scheme.go => util/resourcequota.go} | 33 +- pkg/util/sparkapplication.go | 430 + pkg/util/sparkapplication_test.go | 330 + pkg/util/sparkpod.go | 48 + pkg/util/sparkpod_test.go | 301 + pkg/util/suite_test.go | 37 + pkg/util/util.go | 76 +- pkg/util/util_test.go | 131 + pkg/webhook/certs.go | 170 - pkg/webhook/certs_test.go | 118 - pkg/webhook/patch.go | 856 - pkg/webhook/resourceusage/enforcer.go | 95 - pkg/webhook/resourceusage/handlers.go | 119 - pkg/webhook/resourceusage/util.go | 241 - pkg/webhook/resourceusage/util_test.go | 25 - pkg/webhook/resourceusage/watcher.go | 157 - pkg/webhook/webhook.go | 657 - pkg/webhook/webhook_test.go | 310 - sparkctl/README.md | 92 +- sparkctl/build.sh | 18 +- sparkctl/cmd/client.go | 2 +- sparkctl/cmd/create.go | 21 +- sparkctl/cmd/create_test.go | 24 +- sparkctl/cmd/delete.go | 3 +- sparkctl/cmd/event.go | 5 +- sparkctl/cmd/forward.go | 6 +- sparkctl/cmd/gcs.go | 8 +- sparkctl/cmd/list.go | 5 +- sparkctl/cmd/log.go | 22 +- sparkctl/cmd/s3.go | 2 +- sparkctl/cmd/status.go | 4 +- test/e2e/README.md | 33 - test/e2e/basic_test.go | 118 - test/e2e/framework/cluster_role.go | 101 - test/e2e/framework/cluster_role_binding.go | 103 - test/e2e/framework/config_map.go | 62 - test/e2e/framework/context.go | 75 - test/e2e/framework/deployment.go | 86 - test/e2e/framework/framework.go | 216 - test/e2e/framework/helpers.go | 144 - test/e2e/framework/job.go | 106 - test/e2e/framework/namespace.go | 64 - test/e2e/framework/operator.go | 47 - test/e2e/framework/role.go | 100 - test/e2e/framework/role_binding.go | 108 - test/e2e/framework/service.go | 122 - test/e2e/framework/service_account.go | 89 - test/e2e/framework/sparkapplication.go | 75 - test/e2e/lifecycle_test.go | 103 - test/e2e/main_test.go | 84 - test/e2e/sparkapplication_test.go | 267 + test/e2e/suit_test.go | 159 + test/e2e/volume_mount_test.go | 92 - version.go | 90 + 307 files changed, 66667 insertions(+), 36053 deletions(-) create mode 100644 .golangci.yaml create mode 100644 PROJECT rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta1/defaults.go (100%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta1/doc.go (100%) create mode 100644 api/v1beta1/groupversion_info.go rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta1/register.go (53%) create mode 100644 api/v1beta1/scheduledsparkapplication_types.go rename pkg/apis/sparkoperator.k8s.io/v1beta1/types.go => api/v1beta1/sparkapplication_types.go (83%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta1/zz_generated.deepcopy.go (98%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta2/defaults.go (92%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta2/defaults_test.go (92%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta2/doc.go (100%) create mode 100644 api/v1beta2/groupversion_info.go rename hack/custom-boilerplate.go.txt => api/v1beta2/pod_webhook.go (87%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta2/register.go (53%) create mode 100644 api/v1beta2/scheduledsparkapplication_types.go rename pkg/apis/sparkoperator.k8s.io/v1beta2/types.go => api/v1beta2/sparkapplication_types.go (75%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta2/zz_generated.deepcopy.go (97%) create mode 100644 charts/spark-operator-chart/templates/controller/_helpers.tpl create mode 100644 charts/spark-operator-chart/templates/controller/deployment.yaml create mode 100644 charts/spark-operator-chart/templates/controller/poddisruptionbudget.yaml create mode 100644 charts/spark-operator-chart/templates/controller/rbac.yaml create mode 100644 charts/spark-operator-chart/templates/controller/serviceaccount.yaml delete mode 100644 charts/spark-operator-chart/templates/deployment.yaml delete mode 100644 charts/spark-operator-chart/templates/poddisruptionbudget.yaml delete mode 100644 charts/spark-operator-chart/templates/prometheus-podmonitor.yaml create mode 100644 charts/spark-operator-chart/templates/prometheus/_helpers.tpl create mode 100644 charts/spark-operator-chart/templates/prometheus/podmonitor.yaml delete mode 100644 charts/spark-operator-chart/templates/rbac.yaml delete mode 100644 charts/spark-operator-chart/templates/serviceaccount.yaml delete mode 100644 charts/spark-operator-chart/templates/spark-rbac.yaml delete mode 100644 charts/spark-operator-chart/templates/spark-serviceaccount.yaml create mode 100644 charts/spark-operator-chart/templates/spark/_helpers.tpl create mode 100644 charts/spark-operator-chart/templates/spark/rbac.yaml create mode 100644 charts/spark-operator-chart/templates/spark/serviceaccount.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/deployment.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/mutatingwebhookconfiguration.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/poddisruptionbudget.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/rbac.yaml delete mode 100644 charts/spark-operator-chart/templates/webhook/secret.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/serviceaccount.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/validatingwebhookconfiguration.yaml create mode 100644 charts/spark-operator-chart/tests/controller/deployment_test.yaml create mode 100644 charts/spark-operator-chart/tests/controller/poddisruptionbudget_test.yaml create mode 100644 charts/spark-operator-chart/tests/controller/rbac_test.yaml create mode 100644 charts/spark-operator-chart/tests/controller/serviceaccount_test.yaml delete mode 100644 charts/spark-operator-chart/tests/deployment_test.yaml delete mode 100644 charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml create mode 100644 charts/spark-operator-chart/tests/prometheus/podmonitor_test.yaml delete mode 100644 charts/spark-operator-chart/tests/rbac_test.yaml delete mode 100644 charts/spark-operator-chart/tests/serviceaccount_test.yaml delete mode 100644 charts/spark-operator-chart/tests/spark-rbac_test.yaml delete mode 100644 charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml create mode 100644 charts/spark-operator-chart/tests/spark/rbac_test.yaml create mode 100644 charts/spark-operator-chart/tests/spark/serviceaccount_test.yaml create mode 100644 charts/spark-operator-chart/tests/webhook/deployment_test.yaml create mode 100644 charts/spark-operator-chart/tests/webhook/mutatingwebhookconfiguration_test.yaml create mode 100644 charts/spark-operator-chart/tests/webhook/poddisruptionbudget_test.yaml delete mode 100644 charts/spark-operator-chart/tests/webhook/secret_test.yaml create mode 100644 charts/spark-operator-chart/tests/webhook/validatingwebhookconfiguration_test.yaml create mode 100644 cmd/main.go rename pkg/batchscheduler/interface/interface.go => cmd/operator/controller/root.go (60%) create mode 100644 cmd/operator/controller/start.go create mode 100644 cmd/operator/root.go rename pkg/controller/scheduledsparkapplication/controller_util.go => cmd/operator/version/root.go (53%) create mode 100644 cmd/operator/webhook/root.go create mode 100644 cmd/operator/webhook/start.go create mode 100644 codecov.yaml create mode 100644 config/certmanager/certificate.yaml create mode 100644 config/certmanager/kustomization.yaml create mode 100644 config/certmanager/kustomizeconfig.yaml create mode 100644 config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml create mode 100644 config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml create mode 100644 config/crd/kustomization.yaml create mode 100644 config/crd/kustomizeconfig.yaml create mode 100644 config/crd/patches/cainjection_in_sparkapplications.yaml create mode 100644 config/crd/patches/webhook_in_sparkapplications.yaml create mode 100644 config/default/manager_webhook_patch.yaml create mode 100644 config/default/webhookcainjection_patch.yaml create mode 100644 config/rbac/role.yaml create mode 100644 config/rbac/scheduledsparkapplication_editor_role.yaml create mode 100644 config/rbac/scheduledsparkapplication_viewer_role.yaml create mode 100644 config/rbac/sparkapplication_editor_role.yaml create mode 100644 config/rbac/sparkapplication_viewer_role.yaml create mode 100644 config/samples/kustomization.yaml create mode 100644 config/samples/v1beta1_scheduledsparkapplication.yaml create mode 100644 config/samples/v1beta1_sparkapplication.yaml create mode 100644 config/samples/v1beta2_scheduledsparkapplication.yaml create mode 100644 config/samples/v1beta2_sparkapplication.yaml create mode 100644 config/webhook/kustomization.yaml create mode 100644 config/webhook/kustomizeconfig.yaml create mode 100644 config/webhook/manifests.yaml create mode 100644 config/webhook/service.yaml delete mode 100644 examples/spark-operator-with-metrics.yaml delete mode 100644 examples/spark-operator-with-webhook.yaml create mode 100644 examples/spark-pi-dynamic-allocation.yaml rename examples/{spark-py-pi.yaml => spark-pi-python.yaml} (72%) rename examples/{spark-pi-schedule.yaml => spark-pi-scheduled.yaml} (76%) create mode 100644 examples/spark-pi-volcano.yaml delete mode 100644 hack/api-docs/Dockerfile delete mode 100644 hack/api-docs/api-docs-config.json create mode 100644 hack/api-docs/config.json rename hack/api-docs/{api-docs-template => template}/members.tpl (100%) rename hack/api-docs/{api-docs-template => template}/pkg.tpl (85%) rename hack/api-docs/{api-docs-template => template}/placeholder.go (100%) rename hack/api-docs/{api-docs-template => template}/type.tpl (100%) create mode 100644 hack/boilerplate.go.txt rename {pkg => internal}/controller/doc.go (95%) create mode 100644 internal/controller/mutatingwebhookconfiguration/controller.go create mode 100644 internal/controller/mutatingwebhookconfiguration/event_filter.go create mode 100644 internal/controller/mutatingwebhookconfiguration/event_handler.go create mode 100644 internal/controller/scheduledsparkapplication/controller.go create mode 100644 internal/controller/scheduledsparkapplication/controller_test.go create mode 100644 internal/controller/scheduledsparkapplication/event_filter.go create mode 100644 internal/controller/scheduledsparkapplication/event_handler.go create mode 100644 internal/controller/scheduledsparkapplication/suite_test.go create mode 100644 internal/controller/sparkapplication/controller.go create mode 100644 internal/controller/sparkapplication/controller_test.go rename {pkg => internal}/controller/sparkapplication/driveringress.go (65%) create mode 100644 internal/controller/sparkapplication/driveringress_test.go create mode 100644 internal/controller/sparkapplication/event_filter.go create mode 100644 internal/controller/sparkapplication/event_handler.go rename {pkg => internal}/controller/sparkapplication/monitoring_config.go (55%) create mode 100644 internal/controller/sparkapplication/monitoring_config_test.go create mode 100644 internal/controller/sparkapplication/submission.go create mode 100644 internal/controller/sparkapplication/submission_test.go create mode 100644 internal/controller/sparkapplication/suite_test.go create mode 100644 internal/controller/sparkapplication/validator.go create mode 100644 internal/controller/sparkapplication/web_ui.go create mode 100644 internal/controller/sparkapplication/web_ui_test.go create mode 100644 internal/controller/validatingwebhookconfiguration/controller.go create mode 100644 internal/controller/validatingwebhookconfiguration/event_filter.go create mode 100644 internal/controller/validatingwebhookconfiguration/event_handler.go create mode 100644 internal/metrics/metrcis.go create mode 100644 internal/metrics/sparkapplication_metrics.go create mode 100644 internal/metrics/sparkpod_metrics.go create mode 100644 internal/scheduler/registry.go create mode 100644 internal/scheduler/scheduler.go create mode 100644 internal/scheduler/volcano/scheduler.go rename pkg/batchscheduler/volcano/volcano_scheduler_test.go => internal/scheduler/volcano/scheduler_test.go (80%) rename pkg/util/array_flag.go => internal/scheduler/volcano/util.go (70%) rename {pkg => internal}/webhook/doc.go (100%) create mode 100644 internal/webhook/resourcequota.go create mode 100644 internal/webhook/resourcequota_test.go create mode 100644 internal/webhook/scheduledsparkapplication_defaulter.go create mode 100644 internal/webhook/scheduledsparkapplication_validator.go create mode 100644 internal/webhook/sparkapplication_defaulter.go create mode 100644 internal/webhook/sparkapplication_validator.go create mode 100644 internal/webhook/sparkpod_defaulter.go rename pkg/webhook/patch_test.go => internal/webhook/sparkpod_defaulter_test.go (76%) create mode 100644 internal/webhook/suite_test.go create mode 100644 internal/webhook/webhook.go delete mode 100644 main.go delete mode 100644 manifest/crds/kustomization.yaml delete mode 100644 manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml delete mode 100644 manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml delete mode 100644 manifest/spark-application-rbac/kustomization.yaml delete mode 100644 manifest/spark-application-rbac/spark-application-rbac.yaml delete mode 100644 manifest/spark-operator-install/kustomization.yaml delete mode 100644 manifest/spark-operator-install/spark-operator-rbac.yaml delete mode 100644 manifest/spark-operator-install/spark-operator.yaml delete mode 100644 manifest/spark-operator-with-webhook-install/kustomization.yaml delete mode 100644 manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml delete mode 100644 manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml delete mode 100644 manifest/spark-operator-with-webhook-install/spark-operator-with-webhook.yaml delete mode 100644 pkg/batchscheduler/scheduler_manager.go delete mode 100644 pkg/batchscheduler/volcano/volcano_scheduler.go create mode 100644 pkg/certificate/certificate.go create mode 100644 pkg/certificate/certificate_test.go rename pkg/{apis/sparkoperator.k8s.io/register.go => certificate/doc.go} (84%) create mode 100644 pkg/certificate/suite_test.go rename pkg/{util/cert.go => certificate/util.go} (72%) create mode 100644 pkg/certificate/util_test.go create mode 100644 pkg/common/constants.go rename pkg/{config => common}/doc.go (89%) create mode 100644 pkg/common/event.go create mode 100644 pkg/common/metrics.go create mode 100644 pkg/common/prometheus.go create mode 100644 pkg/common/spark.go create mode 100644 pkg/common/volcano.go delete mode 100644 pkg/config/config.go delete mode 100644 pkg/config/config_test.go delete mode 100644 pkg/config/constants.go delete mode 100644 pkg/config/secret.go delete mode 100644 pkg/config/secret_test.go delete mode 100644 pkg/controller/scheduledsparkapplication/controller.go delete mode 100644 pkg/controller/scheduledsparkapplication/controller_test.go delete mode 100644 pkg/controller/sparkapplication/controller.go delete mode 100644 pkg/controller/sparkapplication/controller_test.go delete mode 100644 pkg/controller/sparkapplication/driveringress_test.go delete mode 100644 pkg/controller/sparkapplication/monitoring_config_test.go delete mode 100644 pkg/controller/sparkapplication/spark_pod_eventhandler.go delete mode 100644 pkg/controller/sparkapplication/spark_pod_eventhandler_test.go delete mode 100644 pkg/controller/sparkapplication/sparkapp_metrics.go delete mode 100644 pkg/controller/sparkapplication/sparkapp_metrics_test.go delete mode 100644 pkg/controller/sparkapplication/sparkapp_util.go delete mode 100644 pkg/controller/sparkapplication/sparkapp_util_test.go delete mode 100644 pkg/controller/sparkapplication/sparkui.go delete mode 100644 pkg/controller/sparkapplication/sparkui_test.go delete mode 100644 pkg/controller/sparkapplication/submission.go delete mode 100644 pkg/controller/sparkapplication/submission_test.go delete mode 100644 pkg/util/cert_test.go delete mode 100644 pkg/util/histogram_buckets.go delete mode 100644 pkg/util/metrics_test.go rename pkg/{webhook/scheme.go => util/resourcequota.go} (57%) create mode 100644 pkg/util/sparkapplication.go create mode 100644 pkg/util/sparkapplication_test.go create mode 100644 pkg/util/sparkpod.go create mode 100644 pkg/util/sparkpod_test.go create mode 100644 pkg/util/suite_test.go create mode 100644 pkg/util/util_test.go delete mode 100644 pkg/webhook/certs.go delete mode 100644 pkg/webhook/certs_test.go delete mode 100644 pkg/webhook/patch.go delete mode 100644 pkg/webhook/resourceusage/enforcer.go delete mode 100644 pkg/webhook/resourceusage/handlers.go delete mode 100644 pkg/webhook/resourceusage/util.go delete mode 100644 pkg/webhook/resourceusage/util_test.go delete mode 100644 pkg/webhook/resourceusage/watcher.go delete mode 100644 pkg/webhook/webhook.go delete mode 100644 pkg/webhook/webhook_test.go delete mode 100644 test/e2e/README.md delete mode 100644 test/e2e/basic_test.go delete mode 100644 test/e2e/framework/cluster_role.go delete mode 100644 test/e2e/framework/cluster_role_binding.go delete mode 100644 test/e2e/framework/config_map.go delete mode 100644 test/e2e/framework/context.go delete mode 100644 test/e2e/framework/deployment.go delete mode 100644 test/e2e/framework/framework.go delete mode 100644 test/e2e/framework/helpers.go delete mode 100644 test/e2e/framework/job.go delete mode 100644 test/e2e/framework/namespace.go delete mode 100644 test/e2e/framework/operator.go delete mode 100644 test/e2e/framework/role.go delete mode 100644 test/e2e/framework/role_binding.go delete mode 100644 test/e2e/framework/service.go delete mode 100644 test/e2e/framework/service_account.go delete mode 100644 test/e2e/framework/sparkapplication.go delete mode 100644 test/e2e/lifecycle_test.go delete mode 100644 test/e2e/main_test.go create mode 100644 test/e2e/sparkapplication_test.go create mode 100644 test/e2e/suit_test.go delete mode 100644 test/e2e/volume_mount_test.go create mode 100644 version.go diff --git a/.dockerignore b/.dockerignore index 22d0d82f8..9b0bebd90 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1 +1,31 @@ -vendor +.github/ +.idea/ +.vscode/ +bin/ +charts/ +docs/ +config/ +examples/ +hack/ +manifest/ +spark-docker/ +sparkctl/ +test/ +vendor/ +.dockerignore +.DS_Store +.gitignore +.gitlab-ci.yaml +.golangci.yaml +.pre-commit-config.yaml +ADOPTERS.md +CODE_OF_CONDUCT.md +codecov.ymal +CONTRIBUTING.md +cover.out +Dockerfile +LICENSE +OWNERS +PROJECT +README.md +test.sh diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 40af1cc3b..ba0ee2a65 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -18,6 +18,11 @@ jobs: with: fetch-depth: "0" + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: The API documentation hasn't changed run: | make build-api-docs @@ -42,7 +47,7 @@ jobs: - name: build sparkctl run: | - make all + make build-sparkctl build-spark-operator: runs-on: ubuntu-latest @@ -57,18 +62,17 @@ jobs: with: go-version-file: "go.mod" - - name: Run gofmt check - run: make fmt-check + - name: Run go fmt check + run: make go-fmt - - name: Run static analysis - run: make static-analysis + - name: Run go vet + run: make go-vet - name: Run unit tests run: make unit-test - name: Build Spark-Operator Docker Image - run: | - docker build -t docker.io/kubeflow/spark-operator:latest . + run: make docker-build IMAGE_TAG=latest - name: Check changes in resources used in docker file run: | diff --git a/.gitignore b/.gitignore index c4f0f0048..022a3ad54 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,11 @@ -.vscode/ +bin/ vendor/ -spark-operator -.idea/ -**/*.iml +cover.out sparkctl/sparkctl -spark-on-k8s-operator sparkctl/sparkctl-linux-amd64 sparkctl/sparkctl-darwin-amd64 +**/*.iml + +# Various IDEs +.idea/ +.vscode/ \ No newline at end of file diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 000000000..01f5a3e58 --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,39 @@ +run: + deadline: 5m + +linters: + enable: + - revive + - gci + - depguard + - godot + - testifylint + - unconvert + +issues: + exclude-rules: + # Disable errcheck linter for test files. + - path: _test.go + linters: + - errcheck + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/kubeflow/spark-operator) + depguard: + Main: + files: + - $all + - "!$test" + listMode: Lax + deny: + reflect: Please don't use reflect package + Test: + files: + - $test + listMode: Lax + deny: + reflect: Please don't use reflect package diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0385ecba5..7086ba78d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,3 +7,4 @@ repos: # Make the tool search for charts only under the `charts` directory - --chart-search-root=charts - --template-files=README.md.gotmpl + - --sort-values-order=file diff --git a/Dockerfile b/Dockerfile index c126cca21..61815e195 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,33 +16,26 @@ ARG SPARK_IMAGE=spark:3.5.0 -FROM golang:1.22-alpine as builder +FROM golang:1.22.5 AS builder WORKDIR /workspace -# Copy the Go Modules manifests -COPY go.mod go.mod -COPY go.sum go.sum -# Cache deps before building and copying source so that we don't need to re-download as much -# and so that source changes don't invalidate our downloaded layer -RUN go mod download +COPY . . -# Copy the go source code -COPY main.go main.go -COPY pkg/ pkg/ - -# Build ARG TARGETARCH -RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -a -o /usr/bin/spark-operator main.go + +RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on make build-operator FROM ${SPARK_IMAGE} + USER root -COPY --from=builder /usr/bin/spark-operator /usr/bin/ -RUN apt-get update --allow-releaseinfo-change \ - && apt-get update \ + +RUN apt-get update \ && apt-get install -y tini \ && rm -rf /var/lib/apt/lists/* +COPY --from=builder /workspace/bin/spark-operator /usr/bin/spark-operator + COPY entrypoint.sh /usr/bin/ ENTRYPOINT ["/usr/bin/entrypoint.sh"] diff --git a/Makefile b/Makefile index b7ae2b378..30ba67c7c 100644 --- a/Makefile +++ b/Makefile @@ -1,28 +1,155 @@ - .SILENT: -.PHONY: clean-sparkctl +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +# Version information. +VERSION=$(shell cat VERSION | sed "s/^v//") +BUILD_DATE = $(shell date -u +"%Y-%m-%dT%H:%M:%S%:z") +GIT_COMMIT = $(shell git rev-parse HEAD) +GIT_TAG = $(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi) +GIT_TREE_STATE = $(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi) +GIT_SHA = $(shell git rev-parse --short HEAD || echo "HEAD") +GIT_VERSION = ${VERSION}-${GIT_SHA} + +REPO=github.com/kubeflow/spark-operator SPARK_OPERATOR_GOPATH=/go/src/github.com/kubeflow/spark-operator +SPARK_OPERATOR_CHART_PATH=charts/spark-operator-chart DEP_VERSION:=`grep DEP_VERSION= Dockerfile | awk -F\" '{print $$2}'` BUILDER=`grep "FROM golang:" Dockerfile | awk '{print $$2}'` UNAME:=`uname | tr '[:upper:]' '[:lower:]'` -REPO=github.com/kubeflow/spark-operator -all: clean-sparkctl build-sparkctl install-sparkctl +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= docker + +# Image URL to use all building/pushing image targets +IMAGE_REGISTRY ?= docker.io +IMAGE_REPOSITORY ?= kubeflow/spark-operator +IMAGE_TAG ?= $(VERSION) +IMAGE ?= $(IMAGE_REGISTRY)/$(IMAGE_REPOSITORY):$(IMAGE_TAG) + +# Kind cluster +KIND_CLUSTER_NAME ?= spark-operator +KIND_CONFIG_FILE ?= charts/spark-operator-chart/ci/kind-config.yaml +KIND_KUBE_CONFIG ?= $(HOME)/.kube/config + +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.29.3 + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-30s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +.PHONY: version +version: ## Print version information. + @echo "Version: ${VERSION}" + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate CustomResourceDefinition, RBAC and WebhookConfiguration manifests. + $(CONTROLLER_GEN) crd rbac:roleName=spark-operator-controller webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: update-crd +update-crd: manifests ## Update CRD files in the Helm chart. + cp config/crd/bases/* charts/spark-operator-chart/crds/ + +.PHONY: clean +clean: ## Clean up caches and output. + @echo "cleaning up caches and output" + go clean -cache -testcache -r -x 2>&1 >/dev/null + -rm -rf _output + +.PHONY: go-fmt +go-fmt: ## Run go fmt against code. + @echo "Running go fmt..." + if [ -n "$(shell go fmt ./...)" ]; then \ + echo "Go code is not formatted, need to run \"make go-fmt\" and commit the changes."; \ + false; \ + else \ + echo "Go code is formatted."; \ + fi + +.PHONY: go-vet +go-vet: ## Run go vet against code. + @echo "Running go vet..." + go vet ./... + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter. + @echo "Running golangci-lint run..." + $(GOLANGCI_LINT) run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes. + @echo "Running golangci-lint run --fix..." + $(GOLANGCI_LINT) run --fix + +.PHONY: unit-test +unit-test: envtest ## Run unit tests. + @echo "Running unit tests..." + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" + go test $(shell go list ./... | grep -v /e2e) -coverprofile cover.out + +.PHONY: e2e-test +e2e-test: envtest ## Run the e2e tests against a Kind k8s instance that is spun up. + @echo "Running e2e tests..." + go test ./test/e2e/ -v -ginkgo.v -timeout 30m + +##@ Build + +override LDFLAGS += \ + -X ${REPO}.version=v${VERSION} \ + -X ${REPO}.buildDate=${BUILD_DATE} \ + -X ${REPO}.gitCommit=${GIT_COMMIT} \ + -X ${REPO}.gitTreeState=${GIT_TREE_STATE} \ + -extldflags "-static" + +.PHONY: build-operator +build-operator: ## Build Spark operator + go build -o bin/spark-operator -ldflags '${LDFLAGS}' cmd/main.go -build-sparkctl: +.PHONY: build-sparkctl +build-sparkctl: ## Build sparkctl binary. [ ! -f "sparkctl/sparkctl-darwin-amd64" ] || [ ! -f "sparkctl/sparkctl-linux-amd64" ] && \ echo building using $(BUILDER) && \ docker run -w $(SPARK_OPERATOR_GOPATH) \ -v $$(pwd):$(SPARK_OPERATOR_GOPATH) $(BUILDER) sh -c \ "apk add --no-cache bash git && \ cd sparkctl && \ - ./build.sh" || true + bash build.sh" || true -clean-sparkctl: - rm -f sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 - -install-sparkctl: | sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 +.PHONY: install-sparkctl +install-sparkctl: | sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 ## Install sparkctl binary. @if [ "$(UNAME)" = "linux" ]; then \ echo "installing linux binary to /usr/local/bin/sparkctl"; \ sudo cp sparkctl/sparkctl-linux-amd64 /usr/local/bin/sparkctl; \ @@ -35,52 +162,179 @@ install-sparkctl: | sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 echo "$(UNAME) not supported"; \ fi -build-api-docs: - docker build -t temp-api-ref-docs hack/api-docs - docker run -v $$(pwd):/repo/ temp-api-ref-docs \ - sh -c "cd /repo/ && /go/bin/gen-crd-api-reference-docs \ - -config /repo/hack/api-docs/api-docs-config.json \ - -api-dir github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2 \ - -template-dir /repo/hack/api-docs/api-docs-template \ - -out-file /repo/docs/api-docs.md" +.PHONY: clean-sparkctl +clean-sparkctl: ## Clean sparkctl binary. + rm -f sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 + +.PHONY: build-api-docs +build-api-docs: gen-crd-api-reference-docs ## Build api documentaion. + $(GEN_CRD_API_REFERENCE_DOCS) \ + -config hack/api-docs/config.json \ + -api-dir github.com/kubeflow/spark-operator/api/v1beta2 \ + -template-dir hack/api-docs/template \ + -out-file docs/api-docs.md + +# If you wish to build the operator image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: ## Build docker image with the operator. + $(CONTAINER_TOOL) build -t ${IMAGE} . + +.PHONY: docker-push +docker-push: ## Push docker image with the operator. + $(CONTAINER_TOOL) push ${IMAGE} + +# PLATFORMS defines the target platforms for the operator image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/amd64,linux/arm64 +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the operator for cross-platform support + - $(CONTAINER_TOOL) buildx create --name spark-operator-builder + $(CONTAINER_TOOL) buildx use spark-operator-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMAGE} -f Dockerfile . + - $(CONTAINER_TOOL) buildx rm spark-operator-builder + +##@ Helm -helm-unittest: +.PHONY: detect-crds-drift +detect-crds-drift: + diff -q charts/spark-operator-chart/crds config/crd/bases + +.PHONY: helm-unittest +helm-unittest: helm-unittest-plugin ## Run Helm chart unittests. helm unittest charts/spark-operator-chart --strict --file "tests/**/*_test.yaml" -helm-lint: - docker run --rm --workdir /workspace --volume "$$(pwd):/workspace" quay.io/helmpack/chart-testing:latest ct lint +.PHONY: helm-lint +helm-lint: ## Run Helm chart lint test. + docker run --rm --workdir /workspace --volume "$$(pwd):/workspace" quay.io/helmpack/chart-testing:latest ct lint --target-branch master --validate-maintainers=false -helm-docs: - docker run --rm --volume "$$(pwd):/helm-docs" -u "$(id -u)" jnorwood/helm-docs:latest +.PHONY: helm-docs +helm-docs: helm-docs-plugin ## Generates markdown documentation for helm charts from requirements and values files. + $(HELM_DOCS) --sort-values-order=file -fmt-check: clean - @echo "running fmt check"; cd "$(dirname $0)"; \ - if [ -n "$(go fmt ./...)" ]; \ - then \ - echo "Go code is not formatted, please run 'go fmt ./...'." >&2; \ - exit 1; \ - else \ - echo "Go code is formatted"; \ +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: kind-create-cluster +kind-create-cluster: kind ## Create a kind cluster for integration tests. + if ! $(KIND) get clusters 2>/dev/null | grep -q "^$(KIND_CLUSTER_NAME)$$"; then \ + kind create cluster --name $(KIND_CLUSTER_NAME) --config $(KIND_CONFIG_FILE) --kubeconfig $(KIND_KUBE_CONFIG); \ fi -detect-crds-drift: - diff -q charts/spark-operator-chart/crds manifest/crds --exclude=kustomization.yaml +.PHONY: kind-load-image +kind-load-image: kind-create-cluster docker-build ## Load the image into the kind cluster. + kind load docker-image --name $(KIND_CLUSTER_NAME) $(IMAGE) -clean: - @echo "cleaning up caches and output" - go clean -cache -testcache -r -x 2>&1 >/dev/null - -rm -rf _output +.PHONY: kind-delete-custer +kind-delete-custer: kind ## Delete the created kind cluster. + $(KIND) delete cluster --name $(KIND_CLUSTER_NAME) && \ + rm -f $(KIND_KUBE_CONFIG) + +.PHONY: install +install-crd: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - + +.PHONY: uninstall +uninstall-crd: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - + +.PHONY: undeploy +undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUBECTL ?= kubectl +KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION) +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION) +KIND ?= $(LOCALBIN)/kind-$(KIND_VERSION) +ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION) +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) +GEN_CRD_API_REFERENCE_DOCS ?= $(LOCALBIN)/gen-crd-api-reference-docs-$(GEN_CRD_API_REFERENCE_DOCS_VERSION) +HELM ?= helm +HELM_UNITTEST ?= unittest +HELM_DOCS ?= $(LOCALBIN)/helm-docs-$(HELM_DOCS_VERSION) -unit-test: clean - @echo "running unit tests" - go test -v ./... -covermode=atomic +## Tool Versions +KUSTOMIZE_VERSION ?= v5.4.1 +CONTROLLER_TOOLS_VERSION ?= v0.15.0 +KIND_VERSION ?= v0.23.0 +ENVTEST_VERSION ?= release-0.18 +GOLANGCI_LINT_VERSION ?= v1.57.2 +GEN_CRD_API_REFERENCE_DOCS_VERSION ?= v0.3.0 +HELM_UNITTEST_VERSION ?= 0.5.1 +HELM_DOCS_VERSION ?= v1.14.2 + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: kind +kind: $(KIND) ## Download kind locally if necessary. +$(KIND): $(LOCALBIN) + $(call go-install-tool,$(KIND),sigs.k8s.io/kind,$(KIND_VERSION)) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION}) + +.PHONY: gen-crd-api-reference-docs +gen-crd-api-reference-docs: $(GEN_CRD_API_REFERENCE_DOCS) ## Download gen-crd-api-reference-docs locally if necessary. +$(GEN_CRD_API_REFERENCE_DOCS): $(LOCALBIN) + $(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs,$(GEN_CRD_API_REFERENCE_DOCS_VERSION)) + +.PHONY: helm-unittest-plugin +helm-unittest-plugin: ## Download helm unittest plugin locally if necessary. + if [ -z "$(shell helm plugin list | grep unittest)" ]; then \ + echo "Installing helm unittest plugin"; \ + helm plugin install https://github.com/helm-unittest/helm-unittest.git --version $(HELM_UNITTEST_VERSION); \ + fi -integration-test: clean - @echo "running integration tests" - go test -v ./test/e2e/ --kubeconfig "$(HOME)/.kube/config" --operator-image=gcr.io/spark-operator/spark-operator:local +.PHONY: helm-docs-plugin +helm-docs-plugin: ## Download helm-docs plugin locally if necessary. + $(call go-install-tool,$(HELM_DOCS),github.com/norwoodj/helm-docs/cmd/helm-docs,$(HELM_DOCS_VERSION)) -static-analysis: - @echo "running go vet" - # echo "Building using $(BUILDER)" - # go vet ./... - go vet $(REPO)... +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary (ideally with version) +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f $(1) ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv "$$(echo "$(1)" | sed "s/-$(3)$$//")" $(1) ;\ +} +endef diff --git a/PROJECT b/PROJECT new file mode 100644 index 000000000..d71e61624 --- /dev/null +++ b/PROJECT @@ -0,0 +1,47 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: sparkoperator.k8s.io +layout: + - go.kubebuilder.io/v4 +projectName: spark-operator +repo: github.com/kubeflow/spark-operator +resources: + - api: + crdVersion: v1 + namespaced: true + controller: true + domain: sparkoperator.k8s.io + kind: SparkApplication + path: github.com/kubeflow/spark-operator/api/v1beta1 + version: v1beta1 + - api: + crdVersion: v1 + namespaced: true + controller: true + domain: sparkoperator.k8s.io + kind: ScheduledSparkApplication + path: github.com/kubeflow/spark-operator/api/v1beta1 + version: v1beta1 + - api: + crdVersion: v1 + namespaced: true + controller: true + domain: sparkoperator.k8s.io + kind: SparkApplication + path: github.com/kubeflow/spark-operator/api/v1beta2 + version: v1beta2 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 + - api: + crdVersion: v1 + namespaced: true + controller: true + domain: sparkoperator.k8s.io + kind: ScheduledSparkApplication + path: github.com/kubeflow/spark-operator/api/v1beta2 + version: v1beta2 +version: "3" diff --git a/README.md b/README.md index 48d92caa2..d90756954 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ The Kubernetes Operator for Apache Spark currently supports the following list o **Current API version:** *`v1beta2`* -**If you are currently using the `v1beta1` version of the APIs in your manifests, please update them to use the `v1beta2` version by changing `apiVersion: "sparkoperator.k8s.io/"` to `apiVersion: "sparkoperator.k8s.io/v1beta2"`. You will also need to delete the `previous` version of the CustomResourceDefinitions named `sparkapplications.sparkoperator.k8s.io` and `scheduledsparkapplications.sparkoperator.k8s.io`, and replace them with the `v1beta2` version either by installing the latest version of the operator or by running `kubectl create -f manifest/crds`.** +**If you are currently using the `v1beta1` version of the APIs in your manifests, please update them to use the `v1beta2` version by changing `apiVersion: "sparkoperator.k8s.io/"` to `apiVersion: "sparkoperator.k8s.io/v1beta2"`. You will also need to delete the `previous` version of the CustomResourceDefinitions named `sparkapplications.sparkoperator.k8s.io` and `scheduledsparkapplications.sparkoperator.k8s.io`, and replace them with the `v1beta2` version either by installing the latest version of the operator or by running `kubectl create -f config/crd/bases`.** ## Prerequisites diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/defaults.go b/api/v1beta1/defaults.go similarity index 100% rename from pkg/apis/sparkoperator.k8s.io/v1beta1/defaults.go rename to api/v1beta1/defaults.go diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/doc.go b/api/v1beta1/doc.go similarity index 100% rename from pkg/apis/sparkoperator.k8s.io/v1beta1/doc.go rename to api/v1beta1/doc.go diff --git a/api/v1beta1/groupversion_info.go b/api/v1beta1/groupversion_info.go new file mode 100644 index 000000000..05b48fe1d --- /dev/null +++ b/api/v1beta1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=sparkoperator.k8s.io +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "sparkoperator.k8s.io", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/register.go b/api/v1beta1/register.go similarity index 53% rename from pkg/apis/sparkoperator.k8s.io/v1beta1/register.go rename to api/v1beta1/register.go index 0280f01cd..df08671c2 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/register.go +++ b/api/v1beta1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,36 +17,18 @@ limitations under the License. package v1beta1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io" ) -const Version = "v1beta1" - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme +const ( + Group = "sparkoperator.k8s.io" + Version = "v1beta1" ) // SchemeGroupVersion is the group version used to register these objects. -var SchemeGroupVersion = schema.GroupVersion{Group: sparkoperator.GroupName, Version: Version} +var SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} // Resource takes an unqualified resource and returns a Group-qualified GroupResource. func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } - -// addKnownTypes adds the set of types defined in this package to the supplied scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &SparkApplication{}, - &SparkApplicationList{}, - &ScheduledSparkApplication{}, - &ScheduledSparkApplicationList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/api/v1beta1/scheduledsparkapplication_types.go b/api/v1beta1/scheduledsparkapplication_types.go new file mode 100644 index 000000000..fd489bacb --- /dev/null +++ b/api/v1beta1/scheduledsparkapplication_types.go @@ -0,0 +1,104 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +kubebuilder:skip + +func init() { + SchemeBuilder.Register(&ScheduledSparkApplication{}, &ScheduledSparkApplicationList{}) +} + +// ScheduledSparkApplicationSpec defines the desired state of ScheduledSparkApplication +type ScheduledSparkApplicationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // Schedule is a cron schedule on which the application should run. + Schedule string `json:"schedule"` + // Template is a template from which SparkApplication instances can be created. + Template SparkApplicationSpec `json:"template"` + // Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. + // Optional. + // Defaults to false. + Suspend *bool `json:"suspend,omitempty"` + // ConcurrencyPolicy is the policy governing concurrent SparkApplication runs. + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + // SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. + // Optional. + // Defaults to 1. + SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"` + // FailedRunHistoryLimit is the number of past failed runs of the application to keep. + // Optional. + // Defaults to 1. + FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"` +} + +// ScheduledSparkApplicationStatus defines the observed state of ScheduledSparkApplication +type ScheduledSparkApplicationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // LastRun is the time when the last run of the application started. + LastRun metav1.Time `json:"lastRun,omitempty"` + // NextRun is the time when the next run of the application will start. + NextRun metav1.Time `json:"nextRun,omitempty"` + // LastRunName is the name of the SparkApplication for the most recent run of the application. + LastRunName string `json:"lastRunName,omitempty"` + // PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs. + PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"` + // PastFailedRunNames keeps the names of SparkApplications for past failed runs. + PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"` + // ScheduleState is the current scheduling state of the application. + ScheduleState ScheduleState `json:"scheduleState,omitempty"` + // Reason tells why the ScheduledSparkApplication is in the particular ScheduleState. + Reason string `json:"reason,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ScheduledSparkApplication is the Schema for the scheduledsparkapplications API +type ScheduledSparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ScheduledSparkApplicationSpec `json:"spec,omitempty"` + Status ScheduledSparkApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScheduledSparkApplicationList contains a list of ScheduledSparkApplication +type ScheduledSparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScheduledSparkApplication `json:"items"` +} + +type ScheduleState string + +const ( + FailedValidationState ScheduleState = "FailedValidation" + ScheduledState ScheduleState = "Scheduled" +) diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go b/api/v1beta1/sparkapplication_types.go similarity index 83% rename from pkg/apis/sparkoperator.k8s.io/v1beta1/types.go rename to api/v1beta1/sparkapplication_types.go index e2d697d73..88f5533b9 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go +++ b/api/v1beta1/sparkapplication_types.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,152 +14,29 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +kubebuilder:skip + package v1beta1 import ( - apiv1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SparkApplicationType describes the type of a Spark application. -type SparkApplicationType string - -// Different types of Spark applications. -const ( - JavaApplicationType SparkApplicationType = "Java" - ScalaApplicationType SparkApplicationType = "Scala" - PythonApplicationType SparkApplicationType = "Python" - RApplicationType SparkApplicationType = "R" -) - -// DeployMode describes the type of deployment of a Spark application. -type DeployMode string - -// Different types of deployments. -const ( - ClusterMode DeployMode = "cluster" - ClientMode DeployMode = "client" - InClusterClientMode DeployMode = "in-cluster-client" -) - -// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. -// This completely defines actions to be taken on any kind of Failures during an application run. -type RestartPolicy struct { - Type RestartPolicyType `json:"type,omitempty"` - - // FailureRetries are the number of times to retry a failed application before giving up in a particular case. - // This is best effort and actual retry attempts can be >= the value specified due to caching. - // These are required if RestartPolicy is OnFailure. - OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"` - OnFailureRetries *int32 `json:"onFailureRetries,omitempty"` - - // Interval to wait between successive retries of a failed application. - OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"` - OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"` -} - -type RestartPolicyType string - -const ( - Never RestartPolicyType = "Never" - OnFailure RestartPolicyType = "OnFailure" - Always RestartPolicyType = "Always" -) - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:defaulter-gen=true - -type ScheduledSparkApplication struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec ScheduledSparkApplicationSpec `json:"spec"` - Status ScheduledSparkApplicationStatus `json:"status,omitempty"` -} - -type ConcurrencyPolicy string - -const ( - // ConcurrencyAllow allows SparkApplications to run concurrently. - ConcurrencyAllow ConcurrencyPolicy = "Allow" - // ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous - // one hasn't finished yet. - ConcurrencyForbid ConcurrencyPolicy = "Forbid" - // ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one. - ConcurrencyReplace ConcurrencyPolicy = "Replace" -) - -type ScheduledSparkApplicationSpec struct { - // Schedule is a cron schedule on which the application should run. - Schedule string `json:"schedule"` - // Template is a template from which SparkApplication instances can be created. - Template SparkApplicationSpec `json:"template"` - // Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. - // Optional. - // Defaults to false. - Suspend *bool `json:"suspend,omitempty"` - // ConcurrencyPolicy is the policy governing concurrent SparkApplication runs. - ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` - // SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. - // Optional. - // Defaults to 1. - SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"` - // FailedRunHistoryLimit is the number of past failed runs of the application to keep. - // Optional. - // Defaults to 1. - FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"` -} +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -type ScheduleState string +// +kubebuilder:skip -const ( - FailedValidationState ScheduleState = "FailedValidation" - ScheduledState ScheduleState = "Scheduled" -) - -type ScheduledSparkApplicationStatus struct { - // LastRun is the time when the last run of the application started. - LastRun metav1.Time `json:"lastRun,omitempty"` - // NextRun is the time when the next run of the application will start. - NextRun metav1.Time `json:"nextRun,omitempty"` - // LastRunName is the name of the SparkApplication for the most recent run of the application. - LastRunName string `json:"lastRunName,omitempty"` - // PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs. - PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"` - // PastFailedRunNames keeps the names of SparkApplications for past failed runs. - PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"` - // ScheduleState is the current scheduling state of the application. - ScheduleState ScheduleState `json:"scheduleState,omitempty"` - // Reason tells why the ScheduledSparkApplication is in the particular ScheduleState. - Reason string `json:"reason,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ScheduledSparkApplicationList carries a list of ScheduledSparkApplication objects. -type ScheduledSparkApplicationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []ScheduledSparkApplication `json:"items,omitempty"` -} - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:defaulter-gen=true - -// SparkApplication represents a Spark application running on and using Kubernetes as a cluster manager. -type SparkApplication struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec SparkApplicationSpec `json:"spec"` - Status SparkApplicationStatus `json:"status,omitempty"` +func init() { + SchemeBuilder.Register(&SparkApplication{}, &SparkApplicationList{}) } -// SparkApplicationSpec describes the specification of a Spark application using Kubernetes as a cluster manager. -// It carries every pieces of information a spark-submit command takes and recognizes. +// SparkApplicationSpec defines the desired state of SparkApplication type SparkApplicationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + // Type tells the type of the Spark application. Type SparkApplicationType `json:"type"` // SparkVersion is the version of Spark the application uses. @@ -208,7 +85,7 @@ type SparkApplicationSpec struct { HadoopConfigMap *string `json:"hadoopConfigMap,omitempty"` // Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors. // Optional. - Volumes []apiv1.Volume `json:"volumes,omitempty"` + Volumes []corev1.Volume `json:"volumes,omitempty"` // Driver is the driver specification. Driver DriverSpec `json:"driver"` // Executor is the executor specification. @@ -246,6 +123,111 @@ type SparkApplicationSpec struct { BatchScheduler *string `json:"batchScheduler,omitempty"` } +// SparkApplicationStatus defines the observed state of SparkApplication +type SparkApplicationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods + SparkApplicationID string `json:"sparkApplicationId,omitempty"` + // SubmissionID is a unique ID of the current submission of the application. + SubmissionID string `json:"submissionID,omitempty"` + // LastSubmissionAttemptTime is the time for the last application submission attempt. + LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"` + // CompletionTime is the time when the application runs to completion if it does. + TerminationTime metav1.Time `json:"terminationTime,omitempty"` + // DriverInfo has information about the driver. + DriverInfo DriverInfo `json:"driverInfo"` + // AppState tells the overall application state. + AppState ApplicationState `json:"applicationState,omitempty"` + // ExecutorState records the state of executors by executor Pod names. + ExecutorState map[string]ExecutorState `json:"executorState,omitempty"` + // ExecutionAttempts is the total number of attempts to run a submitted application to completion. + // Incremented upon each attempted run of the application and reset upon invalidation. + ExecutionAttempts int32 `json:"executionAttempts,omitempty"` + // SubmissionAttempts is the total number of attempts to submit an application to run. + // Incremented upon each attempted submission of the application and reset upon invalidation and rerun. + SubmissionAttempts int32 `json:"submissionAttempts,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SparkApplication is the Schema for the sparkapplications API +type SparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SparkApplicationSpec `json:"spec,omitempty"` + Status SparkApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SparkApplicationList contains a list of SparkApplication +type SparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SparkApplication `json:"items"` +} + +// SparkApplicationType describes the type of a Spark application. +type SparkApplicationType string + +// Different types of Spark applications. +const ( + JavaApplicationType SparkApplicationType = "Java" + ScalaApplicationType SparkApplicationType = "Scala" + PythonApplicationType SparkApplicationType = "Python" + RApplicationType SparkApplicationType = "R" +) + +// DeployMode describes the type of deployment of a Spark application. +type DeployMode string + +// Different types of deployments. +const ( + ClusterMode DeployMode = "cluster" + ClientMode DeployMode = "client" + InClusterClientMode DeployMode = "in-cluster-client" +) + +// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. +// This completely defines actions to be taken on any kind of Failures during an application run. +type RestartPolicy struct { + Type RestartPolicyType `json:"type,omitempty"` + + // FailureRetries are the number of times to retry a failed application before giving up in a particular case. + // This is best effort and actual retry attempts can be >= the value specified due to caching. + // These are required if RestartPolicy is OnFailure. + OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"` + OnFailureRetries *int32 `json:"onFailureRetries,omitempty"` + + // Interval to wait between successive retries of a failed application. + OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"` + OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"` +} + +type RestartPolicyType string + +const ( + Never RestartPolicyType = "Never" + OnFailure RestartPolicyType = "OnFailure" + Always RestartPolicyType = "Always" +) + +type ConcurrencyPolicy string + +const ( + // ConcurrencyAllow allows SparkApplications to run concurrently. + ConcurrencyAllow ConcurrencyPolicy = "Allow" + // ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous + // one hasn't finished yet. + ConcurrencyForbid ConcurrencyPolicy = "Forbid" + // ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one. + ConcurrencyReplace ConcurrencyPolicy = "Replace" +) + // ApplicationStateType represents the type of the current state of an application. type ApplicationStateType string @@ -282,39 +264,6 @@ const ( ExecutorUnknownState ExecutorState = "UNKNOWN" ) -// SparkApplicationStatus describes the current status of a Spark application. -type SparkApplicationStatus struct { - // SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods - SparkApplicationID string `json:"sparkApplicationId,omitempty"` - // SubmissionID is a unique ID of the current submission of the application. - SubmissionID string `json:"submissionID,omitempty"` - // LastSubmissionAttemptTime is the time for the last application submission attempt. - LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"` - // CompletionTime is the time when the application runs to completion if it does. - TerminationTime metav1.Time `json:"terminationTime,omitempty"` - // DriverInfo has information about the driver. - DriverInfo DriverInfo `json:"driverInfo"` - // AppState tells the overall application state. - AppState ApplicationState `json:"applicationState,omitempty"` - // ExecutorState records the state of executors by executor Pod names. - ExecutorState map[string]ExecutorState `json:"executorState,omitempty"` - // ExecutionAttempts is the total number of attempts to run a submitted application to completion. - // Incremented upon each attempted run of the application and reset upon invalidation. - ExecutionAttempts int32 `json:"executionAttempts,omitempty"` - // SubmissionAttempts is the total number of attempts to submit an application to run. - // Incremented upon each attempted submission of the application and reset upon invalidation and rerun. - SubmissionAttempts int32 `json:"submissionAttempts,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SparkApplicationList carries a list of SparkApplication objects. -type SparkApplicationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []SparkApplication `json:"items,omitempty"` -} - // Dependencies specifies all possible types of dependencies of a Spark application. type Dependencies struct { // Jars is a list of JAR files the Spark application depends on. @@ -379,22 +328,22 @@ type SparkPodSpec struct { Annotations map[string]string `json:"annotations,omitempty"` // VolumeMounts specifies the volumes listed in ".spec.volumes" to mount into the main container's filesystem. // Optional. - VolumeMounts []apiv1.VolumeMount `json:"volumeMounts,omitempty"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` // Affinity specifies the affinity/anti-affinity settings for the pod. // Optional. - Affinity *apiv1.Affinity `json:"affinity,omitempty"` + Affinity *corev1.Affinity `json:"affinity,omitempty"` // Tolerations specifies the tolerations listed in ".spec.tolerations" to be applied to the pod. // Optional. - Tolerations []apiv1.Toleration `json:"tolerations,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` // SecurityContext specifies the PodSecurityContext to apply. // Optional. - SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty"` + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` // SchedulerName specifies the scheduler that will be used for scheduling // Optional. SchedulerName *string `json:"schedulerName,omitempty"` // Sidecars is a list of sidecar containers that run along side the main Spark container. // Optional. - Sidecars []apiv1.Container `json:"sidecars,omitempty"` + Sidecars []corev1.Container `json:"sidecars,omitempty"` // HostNetwork indicates whether to request host networking for the pod or not. // Optional. HostNetwork *bool `json:"hostNetwork,omitempty"` @@ -404,12 +353,12 @@ type SparkPodSpec struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` // DnsConfig dns settings for the pod, following the Kubernetes specifications. // Optional. - DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty"` + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` } // DriverSpec is specification of the driver. type DriverSpec struct { - SparkPodSpec + SparkPodSpec `json:",inline"` // PodName is the name of the driver pod that the user creates. This is used for the // in-cluster client mode in which the user creates a client pod where the driver of // the user application runs. It's an error to set this field if Mode is not @@ -426,7 +375,7 @@ type DriverSpec struct { // ExecutorSpec is specification of the executor. type ExecutorSpec struct { - SparkPodSpec + SparkPodSpec `json:",inline"` // Instances is the number of executor instances. // Optional. Instances *int32 `json:"instances,omitempty"` diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go similarity index 98% rename from pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go rename to api/v1beta1/zz_generated.deepcopy.go index 7d7c09d03..719ff9e09 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -1,10 +1,7 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by k8s code-generator DO NOT EDIT. /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,19 +16,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1beta1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ApplicationState) DeepCopyInto(out *ApplicationState) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationState. @@ -82,7 +78,6 @@ func (in *Dependencies) DeepCopyInto(out *Dependencies) { *out = new(int32) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies. @@ -98,7 +93,6 @@ func (in *Dependencies) DeepCopy() *Dependencies { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DriverInfo) DeepCopyInto(out *DriverInfo) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverInfo. @@ -130,7 +124,6 @@ func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec. @@ -162,7 +155,6 @@ func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec. @@ -178,7 +170,6 @@ func (in *ExecutorSpec) DeepCopy() *ExecutorSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GPUSpec) DeepCopyInto(out *GPUSpec) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUSpec. @@ -204,7 +195,6 @@ func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { *out = new(PrometheusSpec) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. @@ -220,7 +210,6 @@ func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameKey) DeepCopyInto(out *NameKey) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameKey. @@ -236,7 +225,6 @@ func (in *NameKey) DeepCopy() *NameKey { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamePath) DeepCopyInto(out *NamePath) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamePath. @@ -267,7 +255,6 @@ func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec. @@ -303,7 +290,6 @@ func (in *RestartPolicy) DeepCopyInto(out *RestartPolicy) { *out = new(int64) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartPolicy. @@ -323,7 +309,6 @@ func (in *ScheduledSparkApplication) DeepCopyInto(out *ScheduledSparkApplication in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplication. @@ -356,7 +341,6 @@ func (in *ScheduledSparkApplicationList) DeepCopyInto(out *ScheduledSparkApplica (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationList. @@ -396,7 +380,6 @@ func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplica *out = new(int32) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationSpec. @@ -424,7 +407,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopyInto(out *ScheduledSparkAppli *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationStatus. @@ -440,7 +422,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplication // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretInfo) DeepCopyInto(out *SecretInfo) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInfo. @@ -460,7 +441,6 @@ func (in *SparkApplication) DeepCopyInto(out *SparkApplication) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplication. @@ -493,7 +473,6 @@ func (in *SparkApplicationList) DeepCopyInto(out *SparkApplicationList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationList. @@ -624,7 +603,6 @@ func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationSpec. @@ -651,7 +629,6 @@ func (in *SparkApplicationStatus) DeepCopyInto(out *SparkApplicationStatus) { (*out)[key] = val } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationStatus. @@ -788,7 +765,6 @@ func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) { *out = new(v1.PodDNSConfig) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPodSpec. diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go b/api/v1beta2/defaults.go similarity index 92% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go rename to api/v1beta2/defaults.go index e46f4012d..aaf2ff25f 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go +++ b/api/v1beta2/defaults.go @@ -24,15 +24,19 @@ func SetSparkApplicationDefaults(app *SparkApplication) { return } + if app.Spec.Type == "" { + app.Spec.Type = SparkApplicationTypeScala + } + if app.Spec.Mode == "" { - app.Spec.Mode = ClusterMode + app.Spec.Mode = DeployModeCluster } if app.Spec.RestartPolicy.Type == "" { - app.Spec.RestartPolicy.Type = Never + app.Spec.RestartPolicy.Type = RestartPolicyNever } - if app.Spec.RestartPolicy.Type != Never { + if app.Spec.RestartPolicy.Type != RestartPolicyNever { // Default to 5 sec if the RestartPolicy is OnFailure or Always and these values aren't specified. if app.Spec.RestartPolicy.OnFailureRetryInterval == nil { app.Spec.RestartPolicy.OnFailureRetryInterval = new(int64) diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go b/api/v1beta2/defaults_test.go similarity index 92% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go rename to api/v1beta2/defaults_test.go index 624374ee1..a516e41e3 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go +++ b/api/v1beta2/defaults_test.go @@ -36,11 +36,11 @@ func TestSetSparkApplicationDefaultsEmptyModeShouldDefaultToClusterMode(t *testi SetSparkApplicationDefaults(app) - assert.Equal(t, ClusterMode, app.Spec.Mode) + assert.Equal(t, DeployModeCluster, app.Spec.Mode) } func TestSetSparkApplicationDefaultsModeShouldNotChangeIfSet(t *testing.T) { - expectedMode := ClientMode + expectedMode := DeployModeClient app := &SparkApplication{ Spec: SparkApplicationSpec{ Mode: expectedMode, @@ -59,21 +59,21 @@ func TestSetSparkApplicationDefaultsEmptyRestartPolicyShouldDefaultToNever(t *te SetSparkApplicationDefaults(app) - assert.Equal(t, Never, app.Spec.RestartPolicy.Type) + assert.Equal(t, RestartPolicyNever, app.Spec.RestartPolicy.Type) } func TestSetSparkApplicationDefaultsOnFailureRestartPolicyShouldSetDefaultValues(t *testing.T) { app := &SparkApplication{ Spec: SparkApplicationSpec{ RestartPolicy: RestartPolicy{ - Type: OnFailure, + Type: RestartPolicyOnFailure, }, }, } SetSparkApplicationDefaults(app) - assert.Equal(t, OnFailure, app.Spec.RestartPolicy.Type) + assert.Equal(t, RestartPolicyOnFailure, app.Spec.RestartPolicy.Type) assert.NotNil(t, app.Spec.RestartPolicy.OnFailureRetryInterval) assert.Equal(t, int64(5), *app.Spec.RestartPolicy.OnFailureRetryInterval) assert.NotNil(t, app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval) @@ -85,7 +85,7 @@ func TestSetSparkApplicationDefaultsOnFailureRestartPolicyShouldSetDefaultValueF app := &SparkApplication{ Spec: SparkApplicationSpec{ RestartPolicy: RestartPolicy{ - Type: OnFailure, + Type: RestartPolicyOnFailure, OnSubmissionFailureRetryInterval: &expectedOnSubmissionFailureRetryInterval, }, }, @@ -93,7 +93,7 @@ func TestSetSparkApplicationDefaultsOnFailureRestartPolicyShouldSetDefaultValueF SetSparkApplicationDefaults(app) - assert.Equal(t, OnFailure, app.Spec.RestartPolicy.Type) + assert.Equal(t, RestartPolicyOnFailure, app.Spec.RestartPolicy.Type) assert.NotNil(t, app.Spec.RestartPolicy.OnFailureRetryInterval) assert.Equal(t, int64(5), *app.Spec.RestartPolicy.OnFailureRetryInterval) assert.NotNil(t, app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval) @@ -105,7 +105,7 @@ func TestSetSparkApplicationDefaultsOnFailureRestartPolicyShouldSetDefaultValueF app := &SparkApplication{ Spec: SparkApplicationSpec{ RestartPolicy: RestartPolicy{ - Type: OnFailure, + Type: RestartPolicyOnFailure, OnFailureRetryInterval: &expectedOnFailureRetryInterval, }, }, @@ -113,7 +113,7 @@ func TestSetSparkApplicationDefaultsOnFailureRestartPolicyShouldSetDefaultValueF SetSparkApplicationDefaults(app) - assert.Equal(t, OnFailure, app.Spec.RestartPolicy.Type) + assert.Equal(t, RestartPolicyOnFailure, app.Spec.RestartPolicy.Type) assert.NotNil(t, app.Spec.RestartPolicy.OnFailureRetryInterval) assert.Equal(t, expectedOnFailureRetryInterval, *app.Spec.RestartPolicy.OnFailureRetryInterval) assert.NotNil(t, app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval) diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/doc.go b/api/v1beta2/doc.go similarity index 100% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/doc.go rename to api/v1beta2/doc.go diff --git a/api/v1beta2/groupversion_info.go b/api/v1beta2/groupversion_info.go new file mode 100644 index 000000000..0f8277c70 --- /dev/null +++ b/api/v1beta2/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains API Schema definitions for the v1beta2 API group +// +kubebuilder:object:generate=true +// +groupName=sparkoperator.k8s.io +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "sparkoperator.k8s.io", Version: "v1beta2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/hack/custom-boilerplate.go.txt b/api/v1beta2/pod_webhook.go similarity index 87% rename from hack/custom-boilerplate.go.txt rename to api/v1beta2/pod_webhook.go index 3f8b10a22..5cdbbd0e3 100644 --- a/hack/custom-boilerplate.go.txt +++ b/api/v1beta2/pod_webhook.go @@ -1,7 +1,5 @@ -// Code generated by k8s code-generator DO NOT EDIT. - /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,3 +13,5 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +package v1beta2 diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/register.go b/api/v1beta2/register.go similarity index 53% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/register.go rename to api/v1beta2/register.go index 20d087b7f..f6eea9ab8 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/register.go +++ b/api/v1beta2/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,36 +17,18 @@ limitations under the License. package v1beta2 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io" ) -const Version = "v1beta2" - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme +const ( + Group = "sparkoperator.k8s.io" + Version = "v1beta2" ) // SchemeGroupVersion is the group version used to register these objects. -var SchemeGroupVersion = schema.GroupVersion{Group: sparkoperator.GroupName, Version: Version} +var SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} // Resource takes an unqualified resource and returns a Group-qualified GroupResource. func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } - -// addKnownTypes adds the set of types defined in this package to the supplied scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &SparkApplication{}, - &SparkApplicationList{}, - &ScheduledSparkApplication{}, - &ScheduledSparkApplicationList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/api/v1beta2/scheduledsparkapplication_types.go b/api/v1beta2/scheduledsparkapplication_types.go new file mode 100644 index 000000000..486a890a1 --- /dev/null +++ b/api/v1beta2/scheduledsparkapplication_types.go @@ -0,0 +1,125 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +func init() { + SchemeBuilder.Register(&ScheduledSparkApplication{}, &ScheduledSparkApplicationList{}) +} + +// ScheduledSparkApplicationSpec defines the desired state of ScheduledSparkApplication. +type ScheduledSparkApplicationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // Schedule is a cron schedule on which the application should run. + Schedule string `json:"schedule"` + // Template is a template from which SparkApplication instances can be created. + Template SparkApplicationSpec `json:"template"` + // Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. + // +optional + // Defaults to false. + Suspend *bool `json:"suspend,omitempty"` + // ConcurrencyPolicy is the policy governing concurrent SparkApplication runs. + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + // SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. + // +optional + // Defaults to 1. + SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"` + // FailedRunHistoryLimit is the number of past failed runs of the application to keep. + // +optional + // Defaults to 1. + FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"` +} + +// ScheduledSparkApplicationStatus defines the observed state of ScheduledSparkApplication. +type ScheduledSparkApplicationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // LastRun is the time when the last run of the application started. + // +nullable + LastRun metav1.Time `json:"lastRun,omitempty"` + // NextRun is the time when the next run of the application will start. + // +nullable + NextRun metav1.Time `json:"nextRun,omitempty"` + // LastRunName is the name of the SparkApplication for the most recent run of the application. + LastRunName string `json:"lastRunName,omitempty"` + // PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs. + PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"` + // PastFailedRunNames keeps the names of SparkApplications for past failed runs. + PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"` + // ScheduleState is the current scheduling state of the application. + ScheduleState ScheduleState `json:"scheduleState,omitempty"` + // Reason tells why the ScheduledSparkApplication is in the particular ScheduleState. + Reason string `json:"reason,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298" +// +kubebuilder:resource:scope=Namespaced,shortName=scheduledsparkapp,singular=scheduledsparkapplication +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=.spec.schedule,name=Schedule,type=string +// +kubebuilder:printcolumn:JSONPath=.spec.suspend,name=Suspend,type=string +// +kubebuilder:printcolumn:JSONPath=.status.lastRun,name=Last Run,type=date +// +kubebuilder:printcolumn:JSONPath=.status.lastRunName,name=Last Run Name,type=string +// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date + +// ScheduledSparkApplication is the Schema for the scheduledsparkapplications API. +type ScheduledSparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ScheduledSparkApplicationSpec `json:"spec,omitempty"` + Status ScheduledSparkApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScheduledSparkApplicationList contains a list of ScheduledSparkApplication. +type ScheduledSparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScheduledSparkApplication `json:"items"` +} + +type ConcurrencyPolicy string + +const ( + // ConcurrencyAllow allows SparkApplications to run concurrently. + ConcurrencyAllow ConcurrencyPolicy = "Allow" + // ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous + // one hasn't finished yet. + ConcurrencyForbid ConcurrencyPolicy = "Forbid" + // ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one. + ConcurrencyReplace ConcurrencyPolicy = "Replace" +) + +type ScheduleState string + +const ( + ScheduleStateNew ScheduleState = "" + ScheduleStateValidating ScheduleState = "Validating" + ScheduleStateScheduled ScheduleState = "Scheduled" + ScheduleStateFailedValidation ScheduleState = "FailedValidation" +) diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/api/v1beta2/sparkapplication_types.go similarity index 75% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/types.go rename to api/v1beta2/sparkapplication_types.go index ca009e739..4a9e13efb 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go +++ b/api/v1beta2/sparkapplication_types.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,168 +17,24 @@ limitations under the License. package v1beta2 import ( - apiv1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SparkApplicationType describes the type of a Spark application. -type SparkApplicationType string - -// Different types of Spark applications. -const ( - JavaApplicationType SparkApplicationType = "Java" - ScalaApplicationType SparkApplicationType = "Scala" - PythonApplicationType SparkApplicationType = "Python" - RApplicationType SparkApplicationType = "R" -) - -// DeployMode describes the type of deployment of a Spark application. -type DeployMode string - -// Different types of deployments. -const ( - ClusterMode DeployMode = "cluster" - ClientMode DeployMode = "client" - InClusterClientMode DeployMode = "in-cluster-client" -) - -// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. -// This completely defines actions to be taken on any kind of Failures during an application run. -type RestartPolicy struct { - // Type specifies the RestartPolicyType. - // +kubebuilder:validation:Enum={Never,Always,OnFailure} - Type RestartPolicyType `json:"type,omitempty"` - - // OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. - // This is best effort and actual retry attempts can be >= the value specified due to caching. - // These are required if RestartPolicy is OnFailure. - // +kubebuilder:validation:Minimum=0 - // +optional - OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"` - - // OnFailureRetries the number of times to retry running an application before giving up. - // +kubebuilder:validation:Minimum=0 - // +optional - OnFailureRetries *int32 `json:"onFailureRetries,omitempty"` - - // OnSubmissionFailureRetryInterval is the interval in seconds between retries on failed submissions. - // +kubebuilder:validation:Minimum=1 - // +optional - OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"` - - // OnFailureRetryInterval is the interval in seconds between retries on failed runs. - // +kubebuilder:validation:Minimum=1 - // +optional - OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"` -} - -type RestartPolicyType string - -const ( - Never RestartPolicyType = "Never" - OnFailure RestartPolicyType = "OnFailure" - Always RestartPolicyType = "Always" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:defaulter-gen=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Namespaced,shortName=scheduledsparkapp,singular=scheduledsparkapplication - -type ScheduledSparkApplication struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec ScheduledSparkApplicationSpec `json:"spec"` - Status ScheduledSparkApplicationStatus `json:"status,omitempty"` -} +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -type ConcurrencyPolicy string - -const ( - // ConcurrencyAllow allows SparkApplications to run concurrently. - ConcurrencyAllow ConcurrencyPolicy = "Allow" - // ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous - // one hasn't finished yet. - ConcurrencyForbid ConcurrencyPolicy = "Forbid" - // ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one. - ConcurrencyReplace ConcurrencyPolicy = "Replace" -) - -type ScheduledSparkApplicationSpec struct { - // Schedule is a cron schedule on which the application should run. - Schedule string `json:"schedule"` - // Template is a template from which SparkApplication instances can be created. - Template SparkApplicationSpec `json:"template"` - // Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. - // +optional - // Defaults to false. - Suspend *bool `json:"suspend,omitempty"` - // ConcurrencyPolicy is the policy governing concurrent SparkApplication runs. - ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` - // SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. - // +optional - // Defaults to 1. - SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"` - // FailedRunHistoryLimit is the number of past failed runs of the application to keep. - // +optional - // Defaults to 1. - FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"` -} - -type ScheduleState string - -const ( - FailedValidationState ScheduleState = "FailedValidation" - ScheduledState ScheduleState = "Scheduled" -) - -type ScheduledSparkApplicationStatus struct { - // LastRun is the time when the last run of the application started. - // +nullable - LastRun metav1.Time `json:"lastRun,omitempty"` - // NextRun is the time when the next run of the application will start. - // +nullable - NextRun metav1.Time `json:"nextRun,omitempty"` - // LastRunName is the name of the SparkApplication for the most recent run of the application. - LastRunName string `json:"lastRunName,omitempty"` - // PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs. - PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"` - // PastFailedRunNames keeps the names of SparkApplications for past failed runs. - PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"` - // ScheduleState is the current scheduling state of the application. - ScheduleState ScheduleState `json:"scheduleState,omitempty"` - // Reason tells why the ScheduledSparkApplication is in the particular ScheduleState. - Reason string `json:"reason,omitempty"` +func init() { + SchemeBuilder.Register(&SparkApplication{}, &SparkApplicationList{}) } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ScheduledSparkApplicationList carries a list of ScheduledSparkApplication objects. -type ScheduledSparkApplicationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []ScheduledSparkApplication `json:"items,omitempty"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:defaulter-gen=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Namespaced,shortName=sparkapp,singular=sparkapplication - -// SparkApplication represents a Spark application running on and using Kubernetes as a cluster manager. -type SparkApplication struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec SparkApplicationSpec `json:"spec"` - Status SparkApplicationStatus `json:"status,omitempty"` -} - -// SparkApplicationSpec describes the specification of a Spark application using Kubernetes as a cluster manager. +// SparkApplicationSpec defines the desired state of SparkApplication // It carries every pieces of information a spark-submit command takes and recognizes. type SparkApplicationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + // Type tells the type of the Spark application. // +kubebuilder:validation:Enum={Java,Python,Scala,R} Type SparkApplicationType `json:"type"` @@ -230,7 +86,7 @@ type SparkApplicationSpec struct { HadoopConfigMap *string `json:"hadoopConfigMap,omitempty"` // Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors. // +optional - Volumes []apiv1.Volume `json:"volumes,omitempty"` + Volumes []corev1.Volume `json:"volumes,omitempty"` // Driver is the driver specification. Driver DriverSpec `json:"driver"` // Executor is the executor specification. @@ -289,6 +145,122 @@ type SparkApplicationSpec struct { DynamicAllocation *DynamicAllocation `json:"dynamicAllocation,omitempty"` } +// SparkApplicationStatus defines the observed state of SparkApplication +type SparkApplicationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods + SparkApplicationID string `json:"sparkApplicationId,omitempty"` + // SubmissionID is a unique ID of the current submission of the application. + SubmissionID string `json:"submissionID,omitempty"` + // LastSubmissionAttemptTime is the time for the last application submission attempt. + // +nullable + LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"` + // CompletionTime is the time when the application runs to completion if it does. + // +nullable + TerminationTime metav1.Time `json:"terminationTime,omitempty"` + // DriverInfo has information about the driver. + DriverInfo DriverInfo `json:"driverInfo"` + // AppState tells the overall application state. + AppState ApplicationState `json:"applicationState,omitempty"` + // ExecutorState records the state of executors by executor Pod names. + ExecutorState map[string]ExecutorState `json:"executorState,omitempty"` + // ExecutionAttempts is the total number of attempts to run a submitted application to completion. + // Incremented upon each attempted run of the application and reset upon invalidation. + ExecutionAttempts int32 `json:"executionAttempts,omitempty"` + // SubmissionAttempts is the total number of attempts to submit an application to run. + // Incremented upon each attempted submission of the application and reset upon invalidation and rerun. + SubmissionAttempts int32 `json:"submissionAttempts,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298" +// +kubebuilder:resource:scope=Namespaced,shortName=sparkapp,singular=sparkapplication +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=.status.applicationState.state,name=Status,type=string +// +kubebuilder:printcolumn:JSONPath=.status.executionAttempts,name=Attempts,type=string +// +kubebuilder:printcolumn:JSONPath=.status.lastSubmissionAttemptTime,name=Start,type=string +// +kubebuilder:printcolumn:JSONPath=.status.terminationTime,name=Finish,type=string +// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date + +// SparkApplication is the Schema for the sparkapplications API +type SparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SparkApplicationSpec `json:"spec,omitempty"` + Status SparkApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SparkApplicationList contains a list of SparkApplication +type SparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SparkApplication `json:"items"` +} + +// SparkApplicationType describes the type of a Spark application. +type SparkApplicationType string + +// Different types of Spark applications. +const ( + SparkApplicationTypeJava SparkApplicationType = "Java" + SparkApplicationTypeScala SparkApplicationType = "Scala" + SparkApplicationTypePython SparkApplicationType = "Python" + SparkApplicationTypeR SparkApplicationType = "R" +) + +// DeployMode describes the type of deployment of a Spark application. +type DeployMode string + +// Different types of deployments. +const ( + DeployModeCluster DeployMode = "cluster" + DeployModeClient DeployMode = "client" + DeployModeInClusterClient DeployMode = "in-cluster-client" +) + +// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. +// This completely defines actions to be taken on any kind of Failures during an application run. +type RestartPolicy struct { + // Type specifies the RestartPolicyType. + // +kubebuilder:validation:Enum={Never,Always,OnFailure} + Type RestartPolicyType `json:"type,omitempty"` + + // OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. + // This is best effort and actual retry attempts can be >= the value specified due to caching. + // These are required if RestartPolicy is OnFailure. + // +kubebuilder:validation:Minimum=0 + // +optional + OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"` + + // OnFailureRetries the number of times to retry running an application before giving up. + // +kubebuilder:validation:Minimum=0 + // +optional + OnFailureRetries *int32 `json:"onFailureRetries,omitempty"` + + // OnSubmissionFailureRetryInterval is the interval in seconds between retries on failed submissions. + // +kubebuilder:validation:Minimum=1 + // +optional + OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"` + + // OnFailureRetryInterval is the interval in seconds between retries on failed runs. + // +kubebuilder:validation:Minimum=1 + // +optional + OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"` +} + +type RestartPolicyType string + +const ( + RestartPolicyNever RestartPolicyType = "Never" + RestartPolicyOnFailure RestartPolicyType = "OnFailure" + RestartPolicyAlways RestartPolicyType = "Always" +) + // BatchSchedulerConfiguration used to configure how to batch scheduling Spark Application type BatchSchedulerConfiguration struct { // Queue stands for the resource queue which the application belongs to, it's being used in Volcano batch scheduler. @@ -300,7 +272,7 @@ type BatchSchedulerConfiguration struct { // Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. // If specified, volcano scheduler will consider it as the resources requested. // +optional - Resources apiv1.ResourceList `json:"resources,omitempty"` + Resources corev1.ResourceList `json:"resources,omitempty"` } // SparkUIConfiguration is for driver UI specific configuration parameters. @@ -316,11 +288,11 @@ type SparkUIConfiguration struct { ServicePortName *string `json:"servicePortName"` // ServiceType allows configuring the type of the service. Defaults to ClusterIP. // +optional - ServiceType *apiv1.ServiceType `json:"serviceType"` + ServiceType *corev1.ServiceType `json:"serviceType"` // ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object. // +optional ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` - // ServiceLables is a map of key,value pairs of labels that might be added to the service object. + // ServiceLabels is a map of key,value pairs of labels that might be added to the service object. // +optional ServiceLabels map[string]string `json:"serviceLabels,omitempty"` // IngressAnnotations is a map of key,value pairs of annotations that might be added to the ingress object. i.e. specify nginx as ingress.class @@ -340,11 +312,11 @@ type DriverIngressConfiguration struct { ServicePortName *string `json:"servicePortName"` // ServiceType allows configuring the type of the service. Defaults to ClusterIP. // +optional - ServiceType *apiv1.ServiceType `json:"serviceType"` + ServiceType *corev1.ServiceType `json:"serviceType"` // ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object. // +optional ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` - // ServiceLables is a map of key,value pairs of labels that might be added to the service object. + // ServiceLabels is a map of key,value pairs of labels that might be added to the service object. // +optional ServiceLabels map[string]string `json:"serviceLabels,omitempty"` // IngressURLFormat is the URL for the ingress. @@ -362,17 +334,17 @@ type ApplicationStateType string // Different states an application may have. const ( - NewState ApplicationStateType = "" - SubmittedState ApplicationStateType = "SUBMITTED" - RunningState ApplicationStateType = "RUNNING" - CompletedState ApplicationStateType = "COMPLETED" - FailedState ApplicationStateType = "FAILED" - FailedSubmissionState ApplicationStateType = "SUBMISSION_FAILED" - PendingRerunState ApplicationStateType = "PENDING_RERUN" - InvalidatingState ApplicationStateType = "INVALIDATING" - SucceedingState ApplicationStateType = "SUCCEEDING" - FailingState ApplicationStateType = "FAILING" - UnknownState ApplicationStateType = "UNKNOWN" + ApplicationStateNew ApplicationStateType = "" + ApplicationStateSubmitted ApplicationStateType = "SUBMITTED" + ApplicationStateRunning ApplicationStateType = "RUNNING" + ApplicationStateCompleted ApplicationStateType = "COMPLETED" + ApplicationStateFailed ApplicationStateType = "FAILED" + ApplicationStateFailedSubmission ApplicationStateType = "SUBMISSION_FAILED" + ApplicationStatePendingRerun ApplicationStateType = "PENDING_RERUN" + ApplicationStateInvalidating ApplicationStateType = "INVALIDATING" + ApplicationStateSucceeding ApplicationStateType = "SUCCEEDING" + ApplicationStateFailing ApplicationStateType = "FAILING" + ApplicationStateUnknown ApplicationStateType = "UNKNOWN" ) // ApplicationState tells the current state of the application and an error message in case of failures. @@ -386,11 +358,11 @@ type DriverState string // Different states a spark driver may have. const ( - DriverPendingState DriverState = "PENDING" - DriverRunningState DriverState = "RUNNING" - DriverCompletedState DriverState = "COMPLETED" - DriverFailedState DriverState = "FAILED" - DriverUnknownState DriverState = "UNKNOWN" + DriverStatePending DriverState = "PENDING" + DriverStateRunning DriverState = "RUNNING" + DriverStateCompleted DriverState = "COMPLETED" + DriverStateFailed DriverState = "FAILED" + DriverStateUnknown DriverState = "UNKNOWN" ) // ExecutorState tells the current state of an executor. @@ -398,48 +370,13 @@ type ExecutorState string // Different states an executor may have. const ( - ExecutorPendingState ExecutorState = "PENDING" - ExecutorRunningState ExecutorState = "RUNNING" - ExecutorCompletedState ExecutorState = "COMPLETED" - ExecutorFailedState ExecutorState = "FAILED" - ExecutorUnknownState ExecutorState = "UNKNOWN" + ExecutorStatePending ExecutorState = "PENDING" + ExecutorStateRunning ExecutorState = "RUNNING" + ExecutorStateCompleted ExecutorState = "COMPLETED" + ExecutorStateFailed ExecutorState = "FAILED" + ExecutorStateUnknown ExecutorState = "UNKNOWN" ) -// SparkApplicationStatus describes the current status of a Spark application. -type SparkApplicationStatus struct { - // SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods - SparkApplicationID string `json:"sparkApplicationId,omitempty"` - // SubmissionID is a unique ID of the current submission of the application. - SubmissionID string `json:"submissionID,omitempty"` - // LastSubmissionAttemptTime is the time for the last application submission attempt. - // +nullable - LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"` - // CompletionTime is the time when the application runs to completion if it does. - // +nullable - TerminationTime metav1.Time `json:"terminationTime,omitempty"` - // DriverInfo has information about the driver. - DriverInfo DriverInfo `json:"driverInfo"` - // AppState tells the overall application state. - AppState ApplicationState `json:"applicationState,omitempty"` - // ExecutorState records the state of executors by executor Pod names. - ExecutorState map[string]ExecutorState `json:"executorState,omitempty"` - // ExecutionAttempts is the total number of attempts to run a submitted application to completion. - // Incremented upon each attempted run of the application and reset upon invalidation. - ExecutionAttempts int32 `json:"executionAttempts,omitempty"` - // SubmissionAttempts is the total number of attempts to submit an application to run. - // Incremented upon each attempted submission of the application and reset upon invalidation and rerun. - SubmissionAttempts int32 `json:"submissionAttempts,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SparkApplicationList carries a list of SparkApplication objects. -type SparkApplicationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []SparkApplication `json:"items,omitempty"` -} - // Dependencies specifies all possible types of dependencies of a Spark application. type Dependencies struct { // Jars is a list of JAR files the Spark application depends on. @@ -497,14 +434,14 @@ type SparkPodSpec struct { Secrets []SecretInfo `json:"secrets,omitempty"` // Env carries the environment variables to add to the pod. // +optional - Env []apiv1.EnvVar `json:"env,omitempty"` + Env []corev1.EnvVar `json:"env,omitempty"` // EnvVars carries the environment variables to add to the pod. // Deprecated. Consider using `env` instead. // +optional EnvVars map[string]string `json:"envVars,omitempty"` // EnvFrom is a list of sources to populate environment variables in the container. // +optional - EnvFrom []apiv1.EnvFromSource `json:"envFrom,omitempty"` + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` // EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. // Deprecated. Consider using `env` instead. // +optional @@ -517,28 +454,28 @@ type SparkPodSpec struct { Annotations map[string]string `json:"annotations,omitempty"` // VolumeMounts specifies the volumes listed in ".spec.volumes" to mount into the main container's filesystem. // +optional - VolumeMounts []apiv1.VolumeMount `json:"volumeMounts,omitempty"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` // Affinity specifies the affinity/anti-affinity settings for the pod. // +optional - Affinity *apiv1.Affinity `json:"affinity,omitempty"` + Affinity *corev1.Affinity `json:"affinity,omitempty"` // Tolerations specifies the tolerations listed in ".spec.tolerations" to be applied to the pod. // +optional - Tolerations []apiv1.Toleration `json:"tolerations,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` // PodSecurityContext specifies the PodSecurityContext to apply. // +optional - PodSecurityContext *apiv1.PodSecurityContext `json:"podSecurityContext,omitempty"` + PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` // SecurityContext specifies the container's SecurityContext to apply. // +optional - SecurityContext *apiv1.SecurityContext `json:"securityContext,omitempty"` + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` // SchedulerName specifies the scheduler that will be used for scheduling // +optional SchedulerName *string `json:"schedulerName,omitempty"` // Sidecars is a list of sidecar containers that run along side the main Spark container. // +optional - Sidecars []apiv1.Container `json:"sidecars,omitempty"` + Sidecars []corev1.Container `json:"sidecars,omitempty"` // InitContainers is a list of init-containers that run to completion before the main Spark container. // +optional - InitContainers []apiv1.Container `json:"initContainers,omitempty"` + InitContainers []corev1.Container `json:"initContainers,omitempty"` // HostNetwork indicates whether to request host networking for the pod or not. // +optional HostNetwork *bool `json:"hostNetwork,omitempty"` @@ -548,7 +485,7 @@ type SparkPodSpec struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` // DnsConfig dns settings for the pod, following the Kubernetes specifications. // +optional - DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty"` + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` // Termination grace period seconds for the pod // +optional TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` @@ -557,7 +494,7 @@ type SparkPodSpec struct { ServiceAccount *string `json:"serviceAccount,omitempty"` // HostAliases settings for the pod, following the Kubernetes specifications. // +optional - HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty"` + HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"` // ShareProcessNamespace settings for the pod, following the Kubernetes specifications. // +optional ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty"` @@ -583,7 +520,7 @@ type DriverSpec struct { JavaOptions *string `json:"javaOptions,omitempty"` // Lifecycle for running preStop or postStart commands // +optional - Lifecycle *apiv1.Lifecycle `json:"lifecycle,omitempty"` + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty"` // KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and // other Kubernetes resources. Default to https://kubernetes.default.svc. // +optional @@ -618,7 +555,7 @@ type ExecutorSpec struct { JavaOptions *string `json:"javaOptions,omitempty"` // Lifecycle for running preStop or postStart commands // +optional - Lifecycle *apiv1.Lifecycle `json:"lifecycle,omitempty"` + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty"` // DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. // Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. // +optional @@ -639,22 +576,22 @@ type SecretType string // An enumeration of secret types supported. const ( - // GCPServiceAccountSecret is for secrets from a GCP service account Json key file that needs + // SecretTypeGCPServiceAccount is for secrets from a GCP service account Json key file that needs // the environment variable GOOGLE_APPLICATION_CREDENTIALS. - GCPServiceAccountSecret SecretType = "GCPServiceAccount" - // HadoopDelegationTokenSecret is for secrets from an Hadoop delegation token that needs the + SecretTypeGCPServiceAccount SecretType = "GCPServiceAccount" + // SecretTypeHadoopDelegationToken is for secrets from an Hadoop delegation token that needs the // environment variable HADOOP_TOKEN_FILE_LOCATION. - HadoopDelegationTokenSecret SecretType = "HadoopDelegationToken" - // GenericType is for secrets that needs no special handling. - GenericType SecretType = "Generic" + SecretTypeHadoopDelegationToken SecretType = "HadoopDelegationToken" + // SecretTypeGeneric is for secrets that needs no special handling. + SecretTypeGeneric SecretType = "Generic" ) // DriverInfo captures information about the driver. type DriverInfo struct { WebUIServiceName string `json:"webUIServiceName,omitempty"` // UI Details for the UI created via ClusterIP service accessible from within the cluster. - WebUIPort int32 `json:"webUIPort,omitempty"` WebUIAddress string `json:"webUIAddress,omitempty"` + WebUIPort int32 `json:"webUIPort,omitempty"` // Ingress Details if an ingress for the UI was created. WebUIIngressName string `json:"webUIIngressName,omitempty"` WebUIIngressAddress string `json:"webUIIngressAddress,omitempty"` @@ -752,39 +689,3 @@ type DynamicAllocation struct { // +optional ShuffleTrackingTimeout *int64 `json:"shuffleTrackingTimeout,omitempty"` } - -// PrometheusMonitoringEnabled returns if Prometheus monitoring is enabled or not. -func (s *SparkApplication) PrometheusMonitoringEnabled() bool { - return s.Spec.Monitoring != nil && s.Spec.Monitoring.Prometheus != nil -} - -// HasPrometheusConfigFile returns if Prometheus monitoring uses a configuration file in the container. -func (s *SparkApplication) HasPrometheusConfigFile() bool { - return s.PrometheusMonitoringEnabled() && - s.Spec.Monitoring.Prometheus.ConfigFile != nil && - *s.Spec.Monitoring.Prometheus.ConfigFile != "" -} - -// HasPrometheusConfig returns if Prometheus monitoring defines metricsProperties in the spec. -func (s *SparkApplication) HasMetricsProperties() bool { - return s.PrometheusMonitoringEnabled() && - s.Spec.Monitoring.MetricsProperties != nil && - *s.Spec.Monitoring.MetricsProperties != "" -} - -// HasPrometheusConfigFile returns if Monitoring defines metricsPropertiesFile in the spec. -func (s *SparkApplication) HasMetricsPropertiesFile() bool { - return s.PrometheusMonitoringEnabled() && - s.Spec.Monitoring.MetricsPropertiesFile != nil && - *s.Spec.Monitoring.MetricsPropertiesFile != "" -} - -// ExposeDriverMetrics returns if driver metrics should be exposed. -func (s *SparkApplication) ExposeDriverMetrics() bool { - return s.Spec.Monitoring != nil && s.Spec.Monitoring.ExposeDriverMetrics -} - -// ExposeExecutorMetrics returns if executor metrics should be exposed. -func (s *SparkApplication) ExposeExecutorMetrics() bool { - return s.Spec.Monitoring != nil && s.Spec.Monitoring.ExposeExecutorMetrics -} diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go similarity index 97% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go rename to api/v1beta2/zz_generated.deepcopy.go index 0b15feb0a..c369db9e0 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -1,10 +1,7 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by k8s code-generator DO NOT EDIT. /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +16,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1beta2 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -32,7 +29,6 @@ import ( // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ApplicationState) DeepCopyInto(out *ApplicationState) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationState. @@ -65,7 +61,6 @@ func (in *BatchSchedulerConfiguration) DeepCopyInto(out *BatchSchedulerConfigura (*out)[key] = val.DeepCopy() } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchSchedulerConfiguration. @@ -111,7 +106,6 @@ func (in *Dependencies) DeepCopyInto(out *Dependencies) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies. @@ -127,7 +121,6 @@ func (in *Dependencies) DeepCopy() *Dependencies { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DriverInfo) DeepCopyInto(out *DriverInfo) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverInfo. @@ -140,6 +133,64 @@ func (in *DriverInfo) DeepCopy() *DriverInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverIngressConfiguration) DeepCopyInto(out *DriverIngressConfiguration) { + *out = *in + if in.ServicePort != nil { + in, out := &in.ServicePort, &out.ServicePort + *out = new(int32) + **out = **in + } + if in.ServicePortName != nil { + in, out := &in.ServicePortName, &out.ServicePortName + *out = new(string) + **out = **in + } + if in.ServiceType != nil { + in, out := &in.ServiceType, &out.ServiceType + *out = new(v1.ServiceType) + **out = **in + } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceLabels != nil { + in, out := &in.ServiceLabels, &out.ServiceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IngressAnnotations != nil { + in, out := &in.IngressAnnotations, &out.IngressAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IngressTLS != nil { + in, out := &in.IngressTLS, &out.IngressTLS + *out = make([]networkingv1.IngressTLS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverIngressConfiguration. +func (in *DriverIngressConfiguration) DeepCopy() *DriverIngressConfiguration { + if in == nil { + return nil + } + out := new(DriverIngressConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { *out = *in @@ -176,12 +227,18 @@ func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { (*out)[key] = val } } + if in.ServiceLabels != nil { + in, out := &in.ServiceLabels, &out.ServiceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]Port, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec. @@ -217,7 +274,6 @@ func (in *DynamicAllocation) DeepCopyInto(out *DynamicAllocation) { *out = new(int64) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicAllocation. @@ -249,6 +305,11 @@ func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) { *out = new(string) **out = **in } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(v1.Lifecycle) + (*in).DeepCopyInto(*out) + } if in.DeleteOnTermination != nil { in, out := &in.DeleteOnTermination, &out.DeleteOnTermination *out = new(bool) @@ -259,7 +320,6 @@ func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) { *out = make([]Port, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec. @@ -275,7 +335,6 @@ func (in *ExecutorSpec) DeepCopy() *ExecutorSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GPUSpec) DeepCopyInto(out *GPUSpec) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUSpec. @@ -306,7 +365,6 @@ func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { *out = new(PrometheusSpec) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. @@ -322,7 +380,6 @@ func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameKey) DeepCopyInto(out *NameKey) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameKey. @@ -338,7 +395,6 @@ func (in *NameKey) DeepCopy() *NameKey { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamePath) DeepCopyInto(out *NamePath) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamePath. @@ -354,7 +410,6 @@ func (in *NamePath) DeepCopy() *NamePath { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Port) DeepCopyInto(out *Port) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port. @@ -390,7 +445,6 @@ func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec. @@ -426,7 +480,6 @@ func (in *RestartPolicy) DeepCopyInto(out *RestartPolicy) { *out = new(int64) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartPolicy. @@ -446,7 +499,6 @@ func (in *ScheduledSparkApplication) DeepCopyInto(out *ScheduledSparkApplication in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplication. @@ -479,7 +531,6 @@ func (in *ScheduledSparkApplicationList) DeepCopyInto(out *ScheduledSparkApplica (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationList. @@ -519,7 +570,6 @@ func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplica *out = new(int32) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationSpec. @@ -547,7 +597,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopyInto(out *ScheduledSparkAppli *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationStatus. @@ -563,7 +612,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplication // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretInfo) DeepCopyInto(out *SecretInfo) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInfo. @@ -583,7 +631,6 @@ func (in *SparkApplication) DeepCopyInto(out *SparkApplication) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplication. @@ -616,7 +663,6 @@ func (in *SparkApplicationList) DeepCopyInto(out *SparkApplicationList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationList. @@ -774,7 +820,6 @@ func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) { *out = new(DynamicAllocation) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationSpec. @@ -801,7 +846,6 @@ func (in *SparkApplicationStatus) DeepCopyInto(out *SparkApplicationStatus) { (*out)[key] = val } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationStatus. @@ -986,7 +1030,6 @@ func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) { *out = new(bool) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPodSpec. @@ -1024,59 +1067,13 @@ func (in *SparkUIConfiguration) DeepCopyInto(out *SparkUIConfiguration) { (*out)[key] = val } } - if in.IngressAnnotations != nil { - in, out := &in.IngressAnnotations, &out.IngressAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.IngressTLS != nil { - in, out := &in.IngressTLS, &out.IngressTLS - *out = make([]networkingv1.IngressTLS, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkUIConfiguration. -func (in *SparkUIConfiguration) DeepCopy() *SparkUIConfiguration { - if in == nil { - return nil - } - out := new(SparkUIConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverIngressConfiguration) DeepCopyInto(out *DriverIngressConfiguration) { - *out = *in - if in.ServicePort != nil { - in, out := &in.ServicePort, &out.ServicePort - *out = new(int32) - **out = **in - } - if in.ServicePortName != nil { - in, out := &in.ServicePortName, &out.ServicePortName - *out = new(string) - **out = **in - } - if in.ServiceType != nil { - in, out := &in.ServiceType, &out.ServiceType - *out = new(v1.ServiceType) - **out = **in - } - if in.ServiceAnnotations != nil { - in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + if in.ServiceLabels != nil { + in, out := &in.ServiceLabels, &out.ServiceLabels *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } - out.IngressURLFormat = in.IngressURLFormat if in.IngressAnnotations != nil { in, out := &in.IngressAnnotations, &out.IngressAnnotations *out = make(map[string]string, len(*in)) @@ -1091,15 +1088,14 @@ func (in *DriverIngressConfiguration) DeepCopyInto(out *DriverIngressConfigurati (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverIngressConfiguration. -func (in *DriverIngressConfiguration) DeepCopy() *DriverIngressConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkUIConfiguration. +func (in *SparkUIConfiguration) DeepCopy() *SparkUIConfiguration { if in == nil { return nil } - out := new(DriverIngressConfiguration) + out := new(SparkUIConfiguration) in.DeepCopyInto(out) return out -} \ No newline at end of file +} diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index b735a995c..b36932321 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,11 +1,39 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + apiVersion: v2 + name: spark-operator -description: A Helm chart for Spark on Kubernetes operator -version: 1.4.3 -appVersion: v1beta2-1.6.1-3.5.0 + +description: A Helm chart for Spark on Kubernetes operator. + +version: 2.0.0-rc.0 + +appVersion: 2.0.0-rc.0 + keywords: - - spark +- apache spark +- big data + home: https://github.com/kubeflow/spark-operator + maintainers: - - name: yuchaoran2011 - email: yuchaoran2011@gmail.com +- name: yuchaoran2011 + email: yuchaoran2011@gmail.com + url: https://github.com/yuchaoran2011 +- name: ChenYi015 + email: github@chenyicn.net + url: https://github.com/ChenYi015 diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index a5a9c1bdc..d07b315b3 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,8 +1,8 @@ # spark-operator -![Version: 1.4.2](https://img.shields.io/badge/Version-1.4.2-informational?style=flat-square) ![AppVersion: v1beta2-1.6.1-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.1--3.5.0-informational?style=flat-square) +![Version: 2.0.0-rc.0](https://img.shields.io/badge/Version-2.0.0--rc.0-informational?style=flat-square) ![AppVersion: 2.0.0-rc.0](https://img.shields.io/badge/AppVersion-2.0.0--rc.0-informational?style=flat-square) -A Helm chart for Spark on Kubernetes operator +A Helm chart for Spark on Kubernetes operator. **Homepage:** @@ -41,13 +41,7 @@ See [helm repo](https://helm.sh/docs/helm/helm_repo) for command documentation. helm install [RELEASE_NAME] spark-operator/spark-operator ``` -For example, if you want to create a release with name `spark-operator` in the `default` namespace: - -```shell -helm install spark-operator spark-operator/spark-operator -``` - -Note that `helm` will fail to install if the namespace doesn't exist. Either create the namespace beforehand or pass the `--create-namespace` flag to the `helm install` command. +For example, if you want to create a release with name `spark-operator` in the `spark-operator` namespace: ```shell helm install spark-operator spark-operator/spark-operator \ @@ -55,6 +49,8 @@ helm install spark-operator spark-operator/spark-operator \ --create-namespace ``` +Note that by passing the `--create-namespace` flag to the `helm install` command, `helm` will create the release namespace if it does not exist. + See [helm install](https://helm.sh/docs/helm/helm_install) for command documentation. ### Upgrade the chart @@ -77,72 +73,93 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum ## Values -| Key | Type | Default | Description | -|-------------------------------------------|--------|------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| affinity | object | `{}` | Affinity for pod assignment | -| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | -| commonLabels | object | `{}` | Common labels to add to the resources | -| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | -| envFrom | list | `[]` | Pod environment variable sources | -| fullnameOverride | string | `""` | String to override release name | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository | -| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | -| imagePullSecrets | list | `[]` | Image pull secrets | -| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | -| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate | -| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. | -| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | -| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace | -| logLevel | int | `2` | Set higher levels for more verbose logging | -| metrics.enable | bool | `true` | Enable prometheus metric scraping | -| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint | -| metrics.port | int | `10254` | Metrics port | -| metrics.portName | string | `"metrics"` | Metrics port name | -| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics | -| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) | -| nodeSelector | object | `{}` | Node labels for pod assignment | -| podDisruptionBudget.enabled | bool | `false` | Whether to deploy a PodDisruptionBudget | -| podDisruptionBudget.minAvailable | int | `1` | An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction | -| podAnnotations | object | `{}` | Additional annotations to add to the pod | -| podLabels | object | `{}` | Additional labels to add to the pod | -| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. | -| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. | -| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | -| podMonitor.labels | object | `{}` | Pod monitor labels | -| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | -| podSecurityContext | object | `{}` | Pod security context | -| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. | -| rbac.annotations | object | `{}` | Optional annotations for rbac | -| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | -| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | -| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | -| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | -| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | -| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | -| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting | -| securityContext | object | `{}` | Operator container security context | -| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account | -| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps | -| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account | -| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account | -| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | -| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | -| sidecars | list | `[]` | Sidecar containers | -| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | -| tolerations | list | `[]` | List of node taints to tolerate | -| uiService.enable | bool | `true` | Enable UI service creation for Spark application | -| volumeMounts | list | `[]` | | -| volumes | list | `[]` | | -| webhook.enable | bool | `false` | Enable webhook server | -| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | -| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects | -| webhook.port | int | `8080` | Webhook service port | -| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | -| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| nameOverride | string | `""` | String to partially override release name. | +| fullnameOverride | string | `""` | String to fully override release name. | +| commonLabels | object | `{}` | Common labels to add to the resources. | +| image.registry | string | `"docker.io"` | Image registry. | +| image.repository | string | `"kubeflow/spark-operator"` | Image repository. | +| image.tag | string | If not set, the chart appVersion will be used. | Image tag. | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | +| image.pullSecrets | list | `[]` | Image pull secrets for private image registry. | +| controller.replicas | int | `1` | Number of replicas of controller. | +| controller.workers | int | `10` | Reconcile concurrency, higher values might increase memory usage. | +| controller.logLevel | string | `"info"` | Configure the verbosity of logging, can be one of `debug`, `info`, `error`. | +| controller.uiService.enable | bool | `true` | Specifies whether to create service for Spark web UI. | +| controller.uiIngress.enable | bool | `false` | Specifies whether to create ingress for Spark web UI. `controller.uiService.enable` must be `true` to enable ingress. | +| controller.uiIngress.urlFormat | string | `""` | Ingress URL format. Required if `controller.uiIngress.enable` is true. | +| controller.batchScheduler.enable | bool | `false` | Specifies whether to enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application. | +| controller.serviceAccount.create | bool | `true` | Specifies whether to create a service account for the controller. | +| controller.serviceAccount.name | string | `""` | Optional name for the controller service account. | +| controller.serviceAccount.annotations | object | `{}` | Extra annotations for the controller service account. | +| controller.rbac.create | bool | `true` | Specifies whether to create RBAC resources for the controller. | +| controller.rbac.annotations | object | `{}` | Extra annotations for the controller RBAC resources. | +| controller.labels | object | `{}` | Extra labels for controller pods. | +| controller.annotations | object | `{}` | Extra annotations for controller pods. | +| controller.volumes | list | `[]` | Volumes for controller pods. | +| controller.nodeSelector | object | `{}` | Node selector for controller pods. | +| controller.affinity | object | `{}` | Affinity for controller pods. | +| controller.tolerations | list | `[]` | List of node taints to tolerate for controller pods. | +| controller.priorityClassName | string | `""` | Priority class for controller pods. | +| controller.podSecurityContext | object | `{}` | Security context for controller pods. | +| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). The labelSelector field in topology spread constraint will be set to the selector labels for controller pods if not specified. | +| controller.env | list | `[]` | Environment variables for controller containers. | +| controller.envFrom | list | `[]` | Environment variable sources for controller containers. | +| controller.volumeMounts | list | `[]` | Volume mounts for controller containers. | +| controller.resources | object | `{}` | Pod resource requests and limits for controller containers. Note, that each job submission will spawn a JVM within the controller pods using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | +| controller.securityContext | object | `{}` | Security context for controller containers. | +| controller.sidecars | list | `[]` | Sidecar containers for controller pods. | +| controller.podDisruptionBudget.enable | bool | `false` | Specifies whether to create pod disruption budget for controller. Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) | +| controller.podDisruptionBudget.minAvailable | int | `1` | The number of pods that must be available. Require `controller.replicas` to be greater than 1 | +| webhook.replicas | int | `1` | Number of replicas of webhook server. | +| webhook.logLevel | string | `"info"` | Configure the verbosity of logging, can be one of `debug`, `info`, `error`. | +| webhook.port | int | `9443` | Specifies webhook port. | +| webhook.portName | string | `"webhook"` | Specifies webhook service port name. | +| webhook.failurePolicy | string | `"Fail"` | Specifies how unrecognized errors are handled. Available options are `Ignore` or `Fail`. | +| webhook.timeoutSeconds | int | `10` | Specifies the timeout seconds of the webhook, the value must be between 1 and 30. | +| webhook.resourceQuotaEnforcement.enable | bool | `false` | Specifies whether to enable the ResourceQuota enforcement for SparkApplication resources. | +| webhook.serviceAccount.create | bool | `true` | Specifies whether to create a service account for the webhook. | +| webhook.serviceAccount.name | string | `""` | Optional name for the webhook service account. | +| webhook.serviceAccount.annotations | object | `{}` | Extra annotations for the webhook service account. | +| webhook.rbac.create | bool | `true` | Specifies whether to create RBAC resources for the webhook. | +| webhook.rbac.annotations | object | `{}` | Extra annotations for the webhook RBAC resources. | +| webhook.labels | object | `{}` | Extra labels for webhook pods. | +| webhook.annotations | object | `{}` | Extra annotations for webhook pods. | +| webhook.sidecars | list | `[]` | Sidecar containers for webhook pods. | +| webhook.volumes | list | `[]` | Volumes for webhook pods. | +| webhook.nodeSelector | object | `{}` | Node selector for webhook pods. | +| webhook.affinity | object | `{}` | Affinity for webhook pods. | +| webhook.tolerations | list | `[]` | List of node taints to tolerate for webhook pods. | +| webhook.priorityClassName | string | `""` | Priority class for webhook pods. | +| webhook.podSecurityContext | object | `{}` | Security context for webhook pods. | +| webhook.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). The labelSelector field in topology spread constraint will be set to the selector labels for webhook pods if not specified. | +| webhook.env | list | `[]` | Environment variables for webhook containers. | +| webhook.envFrom | list | `[]` | Environment variable sources for webhook containers. | +| webhook.volumeMounts | list | `[]` | Volume mounts for webhook containers. | +| webhook.resources | object | `{}` | Pod resource requests and limits for webhook pods. | +| webhook.securityContext | object | `{}` | Security context for webhook containers. | +| webhook.podDisruptionBudget.enable | bool | `false` | Specifies whether to create pod disruption budget for webhook. Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) | +| webhook.podDisruptionBudget.minAvailable | int | `1` | The number of pods that must be available. Require `webhook.replicas` to be greater than 1 | +| spark.jobNamespaces | list | `["default"]` | List of namespaces where to run spark jobs. If empty string is included, all namespaces will be allowed. Make sure the namespaces have already existed. | +| spark.serviceAccount.create | bool | `true` | Specifies whether to create a service account for spark applications. | +| spark.serviceAccount.name | string | `""` | Optional name for the spark service account. | +| spark.serviceAccount.annotations | object | `{}` | Optional annotations for the spark service account. | +| spark.rbac.create | bool | `true` | Specifies whether to create RBAC resources for spark applications. | +| spark.rbac.annotations | object | `{}` | Optional annotations for the spark application RBAC resources. | +| prometheus.metrics.enable | bool | `true` | Specifies whether to enable prometheus metrics scraping. | +| prometheus.metrics.port | int | `8080` | Metrics port. | +| prometheus.metrics.portName | string | `"metrics"` | Metrics port name. | +| prometheus.metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint. | +| prometheus.metrics.prefix | string | `""` | Metrics prefix, will be added to all exported metrics. | +| prometheus.podMonitor.create | bool | `false` | Specifies whether to create pod monitor. Note that prometheus metrics should be enabled as well. | +| prometheus.podMonitor.labels | object | `{}` | Pod monitor labels | +| prometheus.podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | +| prometheus.podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | ## Maintainers | Name | Email | Url | | ---- | ------ | --- | -| yuchaoran2011 | | | +| yuchaoran2011 | | | +| ChenYi015 | | | diff --git a/charts/spark-operator-chart/README.md.gotmpl b/charts/spark-operator-chart/README.md.gotmpl index a20ed517e..0c94c12d0 100644 --- a/charts/spark-operator-chart/README.md.gotmpl +++ b/charts/spark-operator-chart/README.md.gotmpl @@ -43,13 +43,7 @@ See [helm repo](https://helm.sh/docs/helm/helm_repo) for command documentation. helm install [RELEASE_NAME] spark-operator/spark-operator ``` -For example, if you want to create a release with name `spark-operator` in the `default` namespace: - -```shell -helm install spark-operator spark-operator/spark-operator -``` - -Note that `helm` will fail to install if the namespace doesn't exist. Either create the namespace beforehand or pass the `--create-namespace` flag to the `helm install` command. +For example, if you want to create a release with name `spark-operator` in the `spark-operator` namespace: ```shell helm install spark-operator spark-operator/spark-operator \ @@ -57,6 +51,8 @@ helm install spark-operator spark-operator/spark-operator \ --create-namespace ``` +Note that by passing the `--create-namespace` flag to the `helm install` command, `helm` will create the release namespace if it does not exist. + See [helm install](https://helm.sh/docs/helm/helm_install) for command documentation. ### Upgrade the chart diff --git a/charts/spark-operator-chart/ci/ci-values.yaml b/charts/spark-operator-chart/ci/ci-values.yaml index 13d37731c..23b5e1e36 100644 --- a/charts/spark-operator-chart/ci/ci-values.yaml +++ b/charts/spark-operator-chart/ci/ci-values.yaml @@ -1,2 +1,2 @@ image: - tag: "local" + tag: local diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 60e836b08..7f77e1bb9 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -1,11 +1,10 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: (unknown) api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 + controller-gen.kubebuilder.io/version: v0.15.0 name: scheduledsparkapplications.sparkoperator.k8s.io spec: group: sparkoperator.k8s.io @@ -18,1104 +17,1869 @@ spec: singular: scheduledsparkapplication scope: Namespaced versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .spec.suspend - name: Suspend - type: boolean - - jsonPath: .status.lastRun - name: Last Run - type: date - - jsonPath: .status.lastRunName - name: Last Run Name - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - concurrencyPolicy: - type: string - failedRunHistoryLimit: - format: int32 - type: integer - schedule: - type: string - successfulRunHistoryLimit: - format: int32 - type: integer - suspend: - type: boolean - template: - properties: - arguments: - items: - type: string - type: array - batchScheduler: + - additionalPrinterColumns: + - jsonPath: .spec.schedule + name: Schedule + type: string + - jsonPath: .spec.suspend + name: Suspend + type: string + - jsonPath: .status.lastRun + name: Last Run + type: date + - jsonPath: .status.lastRunName + name: Last Run Name + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ScheduledSparkApplication is the Schema for the scheduledsparkapplications + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScheduledSparkApplicationSpec defines the desired state of + ScheduledSparkApplication. + properties: + concurrencyPolicy: + description: ConcurrencyPolicy is the policy governing concurrent + SparkApplication runs. + type: string + failedRunHistoryLimit: + description: |- + FailedRunHistoryLimit is the number of past failed runs of the application to keep. + Defaults to 1. + format: int32 + type: integer + schedule: + description: Schedule is a cron schedule on which the application + should run. + type: string + successfulRunHistoryLimit: + description: |- + SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. + Defaults to 1. + format: int32 + type: integer + suspend: + description: |- + Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. + Defaults to false. + type: boolean + template: + description: Template is a template from which SparkApplication instances + can be created. + properties: + arguments: + description: Arguments is a list of arguments to be passed to + the application. + items: type: string - batchSchedulerOptions: - properties: - priorityClassName: + type: array + batchScheduler: + description: BatchScheduler configures which batch scheduler will + be used for scheduling + type: string + batchSchedulerOptions: + description: BatchSchedulerOptions provides fine-grained control + on how to batch scheduling. + properties: + priorityClassName: + description: PriorityClassName stands for the name of k8s + PriorityClass resource, it's being used in Volcano batch + scheduler. + type: string + queue: + description: Queue stands for the resource queue which the + application belongs to, it's being used in Volcano batch + scheduler. + type: string + resources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. + If specified, volcano scheduler will consider it as the resources requested. + type: object + type: object + deps: + description: Deps captures all possible types of dependencies + of a Spark application. + properties: + excludePackages: + description: |- + ExcludePackages is a list of "groupId:artifactId", to exclude while resolving the + dependencies provided in Packages to avoid dependency conflicts. + items: type: string - queue: + type: array + files: + description: Files is a list of files the Spark application + depends on. + items: type: string - resources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - deps: - properties: - excludePackages: - items: - type: string - type: array - files: - items: - type: string - type: array - jars: - items: - type: string - type: array - packages: - items: - type: string - type: array - pyFiles: - items: - type: string - type: array - repositories: - items: - type: string - type: array - type: object - driver: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: + type: array + jars: + description: Jars is a list of JAR files the Spark application + depends on. + items: + type: string + type: array + packages: + description: |- + Packages is a list of maven coordinates of jars to include on the driver and executor + classpaths. This will search the local maven repo, then maven central and any additional + remote repositories given by the "repositories" option. + Each package should be of the form "groupId:artifactId:version". + items: + type: string + type: array + pyFiles: + description: PyFiles is a list of Python files the Spark application + depends on. + items: + type: string + type: array + repositories: + description: |- + Repositories is a list of additional remote repositories to search for the maven coordinate + given with the "packages" option. + items: + type: string + type: array + type: object + driver: + description: Driver is the driver specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity + settings for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - operator: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - operator: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. type: string - type: array - required: - - key - - operator + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. type: string - type: array - required: - - key - - operator + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - type: object - type: array + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer required: - - nodeSelectorTerms + - podAffinityTerm + - weight type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: - matchExpressions: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - type: object + type: array + required: + - key + - operator type: object - namespaces: - items: - type: string - type: array - topologyKey: + type: array + matchLabels: + additionalProperties: type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: type: string - values: - items: + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. type: string - type: array - required: - - key - - operator + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: - matchExpressions: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - type: object + type: array + required: + - key + - operator type: object - namespaces: - items: - type: string - type: array - topologyKey: + type: array + matchLabels: + additionalProperties: type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string + type: array + required: + - key + - operator type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array type: object - type: array - coreLimit: - type: string - coreRequest: + type: object + annotations: + additionalProperties: type: string - cores: - format: int32 - minimum: 1 - type: integer - dnsConfig: + description: Annotations are the Kubernetes annotations to + be added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to + which the named objects should be mounted to. properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array + name: + type: string + path: + type: string + required: + - name + - path type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the driver. + Maps to `spark.kubernetes.driver.request.cores` that is available since Spark 3.0. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + dnsConfig: + description: DnsConfig dns settings for the pod, following + the Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. type: string - type: array - command: - items: + value: type: string - type: array - env: - items: + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add + to the pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: + key: + description: The key to select. + type: string name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string - value: + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object required: - - name + - fieldPath type: object - type: array - envFrom: - items: + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: + containerName: + description: 'Container name: required for volumes, + optional for env vars' type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource type: object - type: array - image: + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request + for driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the + Kubernetes specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host + networking for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides + Spec.Image if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - imagePullPolicy: + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - lifecycle: + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. properties: - postStart: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. properties: - exec: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: + key: + description: The key to select. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean required: - - port + - key type: object - tcpSocket: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: - host: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array + - fieldPath type: object - httpGet: + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: + containerName: + description: 'Container name: required + for volumes, optional for env vars' type: string - port: + divisor: anyOf: - type: integer - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - scheme: + resource: + description: 'Required: resource to select' type: string required: - - port + - resource type: object - tcpSocket: + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace properties: - host: + key: + description: The key of the secret to + select from. Must be a valid secret + key. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean required: - - port + - key type: object + x-kubernetes-map-type: atomic type: object + required: + - name type: object - livenessProbe: + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps properties: - exec: + configMapRef: + description: The ConfigMap to select from properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string - required: - - port + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - host: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port + optional: + description: Specify whether the Secret must + be defined + type: boolean type: object - timeoutSeconds: - format: int32 - type: integer + x-kubernetes-map-type: atomic type: object - name: - type: string - ports: - items: + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. type: string - type: array - drop: - items: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object type: object - type: array - volumeMounts: - items: + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - javaOptions: - type: string - kubernetesMaster: - type: string - labels: - additionalProperties: - type: string - type: object - lifecycle: - properties: - postStart: + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: + description: Exec specifies the action to take. properties: command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object httpGet: + description: HTTPGet specifies the http request + to perform. properties: host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: + description: The header field value type: string required: - name @@ -1123,49 +1887,212 @@ spec: type: object type: array path: + description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. properties: host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - preStop: - properties: + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: exec: + description: Exec specifies the action to take. properties: command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object httpGet: + description: HTTPGet specifies the http request + to perform. properties: host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: + description: The header field value type: string required: - name @@ -1173,3324 +2100,9515 @@ spec: type: object type: array path: + description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. properties: host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podName: - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' - type: string - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: + resizePolicy: + description: Resources resize policy for the container. items: + description: ContainerResizePolicy represents resource + resize policy for the container. properties: - name: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. type: string - value: + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. type: string required: - - name - - value + - resourceName + - restartPolicy type: object type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ properties: - add: - items: - type: string - type: array - drop: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: - type: string + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object type: object - privileged: - type: boolean - procMount: + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string - type: array - env: - items: + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - name: + level: + description: Level is SELinux level label that + applies to the container. type: string - value: + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object required: - - name + - type type: object - type: array - envFrom: - items: + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - required: - - port - type: object - tcpSocket: - properties: - host: + value: + description: The header field value type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array + - name + - value type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: + type: array + path: + description: Path to access on the HTTP server. type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name type: object - securityContext: + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean - capabilities: - properties: - add: - items: + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the driver. For instance, + GC settings or other logging. + type: string + kubernetesMaster: + description: |- + KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and + other Kubernetes resources. Default to https://kubernetes.default.svc. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added + to the pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: array - drop: - items: + value: + description: The header field value type: string - type: array - type: object - privileged: - type: boolean - procMount: + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. format: int64 type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + required: + - seconds type: object - startupProbe: + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: - exec: - properties: - command: - items: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 type: integer + required: + - seconds type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for + the pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory + to allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podName: + description: |- + PodName is the name of the driver pod that the user creates. This is used for the + in-cluster client mode in which the user creates a client pod where the driver of + the user application runs. It's an error to set this field if Mode is not + in-cluster-client. + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - terminationMessagePolicy: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - tty: + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean - volumeDevices: + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the + pods objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will + be used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add + to the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object + description: Capability represent POSIX capabilities + type + type: string type: array - volumeMounts: + drop: + description: Removed capabilities items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object + description: Capability represent POSIX capabilities + type + type: string type: array - workingDir: - type: string - required: - - name type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - effect: + level: + description: Level is SELinux level label that applies + to the container. type: string - key: + role: + description: Role is a SELinux role label that applies + to the container. type: string - operator: + type: + description: Type is a SELinux type label that applies + to the container. type: string - tolerationSeconds: - format: int64 - type: integer - value: + user: + description: User is a SELinux user label that applies + to the container. type: string type: object - type: array - volumeMounts: - items: + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: - mountPath: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - mountPropagation: - type: string - name: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - readOnly: + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean - subPath: - type: string - subPathExpr: + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - required: - - mountPath - - name type: object - type: array - type: object - dynamicAllocation: - properties: - enabled: - type: boolean - initialExecutors: - format: int32 - type: integer - maxExecutors: - format: int32 - type: integer - minExecutors: - format: int32 - type: integer - shuffleTrackingTimeout: - format: int64 - type: integer - type: object - executor: - properties: - affinity: + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: |- + ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + serviceLabels: + additionalProperties: + type: string + description: |- + ServiceLabels defines the labels to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that + run along side the main Spark container. + items: + description: A single application container that you want + to run within a pod. properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. type: string required: - - topologyKey + - fieldPath type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' type: string required: - - topologyKey + - resource type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey + x-kubernetes-map-type: atomic type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - deleteOnTermination: - type: boolean - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string + required: + - name type: object type: array - searches: + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: + description: EnvFromSource represents the source of + a set of ConfigMaps properties: - configMapKeyRef: + configMapRef: + description: The ConfigMap to select from properties: - key: - type: string name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: + description: Specify whether the ConfigMap + must be defined type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource type: object - secretKeyRef: + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - key: - type: string name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: + description: Specify whether the Secret must + be defined type: boolean - required: - - key type: object + x-kubernetes-map-type: atomic type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - configMapRef: + exec: + description: Exec specifies the action to take. properties: - name: - type: string - optional: - type: boolean + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array type: object - prefix: - type: string - secretRef: + httpGet: + description: HTTPGet specifies the http request + to perform. properties: - name: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - required: - - port - type: object - tcpSocket: - properties: - host: + value: + description: The header field value type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port + - name + - value type: object - type: object - type: object - livenessProbe: + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. format: int32 type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. format: int32 type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort type: object - name: - type: string - ports: - items: + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. format: int32 type: integer - name: - type: string - protocol: + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - - containerPort - - protocol + - port type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - - port + - name type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - devicePath: + level: + description: Level is SELinux level label that + applies to the container. type: string - name: + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. type: string - required: - - devicePath - - name type: object - type: array - volumeMounts: - items: + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: - mountPath: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - mountPropagation: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - name: + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string - readOnly: - type: boolean - subPath: + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. type: string - subPathExpr: + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - required: - - mountPath - - name type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - instances: - format: int32 - minimum: 1 - type: integer - javaOptions: - type: string - labels: - additionalProperties: - type: string - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - privileged: + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false type: boolean - procMount: + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. type: string - readOnlyRootFilesystem: + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. type: boolean - runAsGroup: + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed + in ".spec.tolerations" to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer - runAsNonRoot: + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in + ".spec.volumes" to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name type: object - serviceAccount: - type: string - shareProcessNamespace: - type: boolean - sidecars: + type: array + type: object + driverIngressOptions: + description: DriverIngressOptions allows configuring the Service + and the Ingress to expose ports inside Spark Driver + items: + description: DriverIngressConfiguration is for driver ingress + specific configuration parameters. + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL + certificates to the ingress object items: + description: IngressTLS describes the transport layer + security associated with an ingress. properties: - args: - items: - type: string - type: array - command: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. items: type: string type: array - env: + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + ingressURLFormat: + description: IngressURLFormat is the URL for the ingress. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLabels is a map of key,value pairs of + labels that might be added to the service object. + type: object + servicePort: + description: ServicePort allows configuring the port at + service level that might be different from the targetPort. + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + type: string + serviceType: + description: ServiceType allows configuring the type of + the service. Defaults to ClusterIP. + type: string + required: + - servicePort + - servicePortName + type: object + type: array + dynamicAllocation: + description: |- + DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes + scheduler backend since Spark 3.0. + properties: + enabled: + description: Enabled controls whether dynamic allocation is + enabled or not. + type: boolean + initialExecutors: + description: |- + InitialExecutors is the initial number of executors to request. If .spec.executor.instances + is also set, the initial number of executors is set to the bigger of that and this option. + format: int32 + type: integer + maxExecutors: + description: MaxExecutors is the upper bound for the number + of executors if dynamic allocation is enabled. + format: int32 + type: integer + minExecutors: + description: MinExecutors is the lower bound for the number + of executors if dynamic allocation is enabled. + format: int32 + type: integer + shuffleTrackingTimeout: + description: |- + ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding + shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled). + format: int64 + type: integer + type: object + executor: + description: Executor is the executor specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity + settings for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: - name: - type: string - value: - type: string - valueFrom: + preference: + description: A node selector term, associated + with the corresponding weight. properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object + - preference + - weight type: object type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - value: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer - name: - type: string - protocol: - type: string required: - - containerPort - - protocol + - podAffinityTerm + - weight type: object type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - - mountPath - - name + - topologyKey type: object type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name type: object - type: array - type: object - failureRetries: - format: int32 - type: integer - hadoopConf: - additionalProperties: - type: string - type: object - hadoopConfigMap: - type: string - image: - type: string - imagePullPolicy: - type: string - imagePullSecrets: - items: - type: string - type: array - mainApplicationFile: - type: string - mainClass: - type: string - memoryOverheadFactor: - type: string - mode: - enum: - - cluster - - client - type: string - monitoring: - properties: - exposeDriverMetrics: - type: boolean - exposeExecutorMetrics: - type: boolean - metricsProperties: - type: string - metricsPropertiesFile: + type: object + annotations: + additionalProperties: type: string - prometheus: + description: Annotations are the Kubernetes annotations to + be added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to + which the named objects should be mounted to. properties: - configFile: - type: string - configuration: - type: string - jmxExporterJar: + name: type: string - port: - format: int32 - maximum: 49151 - minimum: 1024 - type: integer - portName: + path: type: string required: - - jmxExporterJar + - name + - path type: object - required: - - exposeDriverMetrics - - exposeExecutorMetrics - type: object - nodeSelector: - additionalProperties: + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional type: string - type: object - proxyUser: - type: string - pythonVersion: - enum: - - "2" - - "3" - type: string - restartPolicy: - properties: - onFailureRetries: - format: int32 - minimum: 0 - type: integer - onFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - onSubmissionFailureRetries: - format: int32 - minimum: 0 - type: integer - onSubmissionFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - type: - enum: - - Never - - Always - - OnFailure - type: string - type: object - retryInterval: - format: int64 - type: integer - sparkConf: - additionalProperties: + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the executors. + Maps to `spark.kubernetes.executor.request.cores` that is available since Spark 2.4. type: string - type: object - sparkConfigMap: - type: string - sparkUIOptions: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - serviceType: - type: string - type: object - driverIngressOptions: - items: + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + deleteOnTermination: + description: |- + DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. + Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. + type: boolean + dnsConfig: + description: DnsConfig dns settings for the pod, following + the Kubernetes specifications. properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressURLFormat: - type: string - ingressAnnotations: - additionalProperties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: type: string - type: object - ingressTLS: + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. properties: - hosts: - items: - type: string - type: array - secretName: + name: + description: Required. + type: string + value: type: string type: object type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array type: object - type: array - sparkVersion: - type: string - timeToLiveSeconds: - format: int64 - type: integer - type: - enum: - - Java - - Python - - Scala - - R - type: string - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - required: - - secretName - - shareName - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - monitors - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeID: - type: string - required: - - volumeID - type: object - configMap: - properties: - defaultMode: - format: int32 - type: integer - items: - items: + env: + description: Env carries the environment variables to add + to the pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: key: + description: The key to select. type: string - mode: - format: int32 - type: integer - path: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean required: - key - - path type: object - type: array - name: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request + for driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the + Kubernetes specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: type: string - optional: - type: boolean - type: object - csi: - properties: - driver: + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host + networking for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides + Spec.Image if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - fsType: + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - nodePublishSecretRef: + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. properties: name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name type: object - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - required: - - driver - type: object - downwardAPI: - properties: - defaultMode: - format: int32 - type: integer + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: - items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - fieldRef: + exec: + description: Exec specifies the action to take. properties: - apiVersion: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. type: string - fieldPath: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - fieldPath + - port type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: - containerName: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string - divisor: + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - resource: - type: string required: - - resource + - port type: object - required: - - path type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - spec: - properties: - accessModes: - items: + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - type: array - resources: - properties: - requests: + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: - storage: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: object - type: object - storageClassName: - type: string - type: object - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - format: int32 - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - required: - - driver - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - pdName: - type: string - readOnly: - type: boolean - required: - - pdName - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - required: - - repository - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - required: - - endpoints - - path - type: object - hostPath: - properties: - path: - type: string - type: - type: string - required: - - path - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - format: int32 - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - targetPortal: - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - required: - - pdID - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - projected: - properties: - defaultMode: - format: int32 - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path + value: + description: The header field value + type: string + required: + - name + - value type: object type: array - name: + path: + description: Path to access on the HTTP + server. type: string - optional: - type: boolean - type: object - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - type: boolean + required: + - port type: object - serviceAccountToken: + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. properties: - audience: - type: string - expirationSeconds: + seconds: + description: Seconds is the number of seconds + to sleep. format: int64 type: integer - path: + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - path + - port type: object type: object - type: array - required: - - sources - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - required: - - registry - - volume - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - image - - monitors - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - properties: - defaultMode: - format: int32 - type: integer - items: - items: + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. properties: - key: - type: string - mode: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. format: int32 type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - key - - path + - port type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: - type: string - storagePolicyID: - type: string - storagePolicyName: - type: string - volumePath: - type: string + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + instances: + description: Instances is the number of executor instances. + format: int32 + minimum: 1 + type: integer + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the executors. For instance, + GC settings or other logging. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added + to the pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for + the pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory + to allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the + pods objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will + be used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add + to the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string required: - - volumePath + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that + run along side the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed + in ".spec.tolerations" to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in + ".spec.volumes" to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + failureRetries: + description: |- + FailureRetries is the number of times to retry a failed application before giving up. + This is best effort and actual retry attempts can be >= the value specified. + format: int32 + type: integer + hadoopConf: + additionalProperties: + type: string + description: |- + HadoopConf carries user-specified Hadoop configuration properties as they would use the the "--conf" option + in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop + configuration properties. + type: object + hadoopConfigMap: + description: |- + HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. + The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + image: + description: |- + Image is the container image for the driver, executor, and init-container. Any custom container images for the + driver, executor, or init-container takes precedence over this. + type: string + imagePullPolicy: + description: ImagePullPolicy is the image pull policy for the + driver, executor, and init-container. + type: string + imagePullSecrets: + description: ImagePullSecrets is the list of image-pull secrets. + items: + type: string + type: array + mainApplicationFile: + description: MainFile is the path to a bundled JAR, Python, or + R file of the application. + type: string + mainClass: + description: |- + MainClass is the fully-qualified main class of the Spark application. + This only applies to Java/Scala Spark applications. + type: string + memoryOverheadFactor: + description: |- + This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. + For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will + be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set. + type: string + mode: + description: Mode is the deployment mode of the Spark application. + enum: + - cluster + - client + type: string + monitoring: + description: Monitoring configures how monitoring is handled. + properties: + exposeDriverMetrics: + description: ExposeDriverMetrics specifies whether to expose + metrics on the driver. + type: boolean + exposeExecutorMetrics: + description: ExposeExecutorMetrics specifies whether to expose + metrics on the executors. + type: boolean + metricsProperties: + description: |- + MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. + If not specified, the content in spark-docker/conf/metrics.properties will be used. + type: string + metricsPropertiesFile: + description: |- + MetricsPropertiesFile is the container local path of file metrics.properties for configuring + the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used. + type: string + prometheus: + description: Prometheus is for configuring the Prometheus + JMX exporter. + properties: + configFile: + description: |- + ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. + ConfigFile takes precedence over Configuration, which is shown below. + type: string + configuration: + description: |- + Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. + If not specified, the content in spark-docker/conf/prometheus.yaml will be used. + Configuration has no effect if ConfigFile is set. + type: string + jmxExporterJar: + description: JmxExporterJar is the path to the Prometheus + JMX exporter jar in the container. + type: string + port: + description: |- + Port is the port of the HTTP server run by the Prometheus JMX exporter. + If not specified, 8090 will be used as the default. + format: int32 + maximum: 49151 + minimum: 1024 + type: integer + portName: + description: |- + PortName is the port name of prometheus JMX exporter port. + If not specified, jmx-exporter will be used as the default. + type: string required: - - name + - jmxExporterJar type: object - type: array - required: - - driver - - executor - - sparkVersion - - type - type: object - required: - - schedule - - template - type: object - status: - properties: - lastRun: - format: date-time - nullable: true - type: string - lastRunName: - type: string - nextRun: - format: date-time - nullable: true - type: string - pastFailedRunNames: - items: + required: + - exposeDriverMetrics + - exposeExecutorMetrics + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). + This field will be deprecated in future versions (at SparkApplicationSpec level). + type: object + proxyUser: + description: |- + ProxyUser specifies the user to impersonate when submitting the application. + It maps to the command-line flag "--proxy-user" in spark-submit. + type: string + pythonVersion: + description: |- + This sets the major Python version of the docker + image used to run the driver and executor containers. Can either be 2 or 3, default 2. + enum: + - "2" + - "3" + type: string + restartPolicy: + description: RestartPolicy defines the policy on if and in which + conditions the controller should restart an application. + properties: + onFailureRetries: + description: OnFailureRetries the number of times to retry + running an application before giving up. + format: int32 + minimum: 0 + type: integer + onFailureRetryInterval: + description: OnFailureRetryInterval is the interval in seconds + between retries on failed runs. + format: int64 + minimum: 1 + type: integer + onSubmissionFailureRetries: + description: |- + OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. + This is best effort and actual retry attempts can be >= the value specified due to caching. + These are required if RestartPolicy is OnFailure. + format: int32 + minimum: 0 + type: integer + onSubmissionFailureRetryInterval: + description: OnSubmissionFailureRetryInterval is the interval + in seconds between retries on failed submissions. + format: int64 + minimum: 1 + type: integer + type: + description: Type specifies the RestartPolicyType. + enum: + - Never + - Always + - OnFailure + type: string + type: object + retryInterval: + description: RetryInterval is the unit of intervals in seconds + between submission retries. + format: int64 + type: integer + sparkConf: + additionalProperties: + type: string + description: |- + SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in + spark-submit. + type: object + sparkConfigMap: + description: |- + SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. + The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + sparkUIOptions: + description: SparkUIOptions allows configuring the Service and + the Ingress to expose the sparkUI + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL + certificates to the ingress object + items: + description: IngressTLS describes the transport layer security + associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLabels is a map of key,value pairs of + labels that might be added to the service object. + type: object + servicePort: + description: |- + ServicePort allows configuring the port at service level that might be different from the targetPort. + TargetPort should be the same as the one defined in spark.ui.port + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + Defaults to spark-driver-ui-port. + type: string + serviceType: + description: ServiceType allows configuring the type of the + service. Defaults to ClusterIP. + type: string + type: object + sparkVersion: + description: SparkVersion is the version of Spark the application + uses. type: string - type: array - pastSuccessfulRunNames: - items: + timeToLiveSeconds: + description: |- + TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication + after its termination. + The SparkApplication object will be garbage collected if the current time is more than the + TimeToLiveSeconds since its termination. + format: int64 + type: integer + type: + description: Type tells the type of the Spark application. + enum: + - Java + - Python + - Scala + - R type: string - type: array - reason: + volumes: + description: Volumes is the list of Kubernetes volumes that can + be mounted by the driver and/or executors. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in + the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the + blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure managed + data disk (only in managed availability set). defaults + to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use + for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 + encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool + associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - driver + - executor + - sparkVersion + - type + type: object + required: + - schedule + - template + type: object + status: + description: ScheduledSparkApplicationStatus defines the observed state + of ScheduledSparkApplication. + properties: + lastRun: + description: LastRun is the time when the last run of the application + started. + format: date-time + nullable: true + type: string + lastRunName: + description: LastRunName is the name of the SparkApplication for the + most recent run of the application. + type: string + nextRun: + description: NextRun is the time when the next run of the application + will start. + format: date-time + nullable: true + type: string + pastFailedRunNames: + description: PastFailedRunNames keeps the names of SparkApplications + for past failed runs. + items: type: string - scheduleState: + type: array + pastSuccessfulRunNames: + description: PastSuccessfulRunNames keeps the names of SparkApplications + for past successful runs. + items: type: string - type: object - required: - - metadata - - spec - type: object - -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] + type: array + reason: + description: Reason tells why the ScheduledSparkApplication is in + the particular ScheduleState. + type: string + scheduleState: + description: ScheduleState is the current scheduling state of the + application. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml index c67bb2afa..afc07c253 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -1,11 +1,10 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: (unknown) api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 + controller-gen.kubebuilder.io/version: v0.15.0 name: sparkapplications.sparkoperator.k8s.io spec: group: sparkoperator.k8s.io @@ -18,1090 +17,1825 @@ spec: singular: sparkapplication scope: Namespaced versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: { } - additionalPrinterColumns: - - jsonPath: .status.applicationState.state - name: Status - type: string - - jsonPath: .status.executionAttempts - name: Attempts - type: string - - jsonPath: .status.lastSubmissionAttemptTime - name: Start - type: string - - jsonPath: .status.terminationTime - name: Finish - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - arguments: - items: - type: string - type: array - batchScheduler: + - additionalPrinterColumns: + - jsonPath: .status.applicationState.state + name: Status + type: string + - jsonPath: .status.executionAttempts + name: Attempts + type: string + - jsonPath: .status.lastSubmissionAttemptTime + name: Start + type: string + - jsonPath: .status.terminationTime + name: Finish + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SparkApplication is the Schema for the sparkapplications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + SparkApplicationSpec defines the desired state of SparkApplication + It carries every pieces of information a spark-submit command takes and recognizes. + properties: + arguments: + description: Arguments is a list of arguments to be passed to the + application. + items: type: string - batchSchedulerOptions: - properties: - priorityClassName: + type: array + batchScheduler: + description: BatchScheduler configures which batch scheduler will + be used for scheduling + type: string + batchSchedulerOptions: + description: BatchSchedulerOptions provides fine-grained control on + how to batch scheduling. + properties: + priorityClassName: + description: PriorityClassName stands for the name of k8s PriorityClass + resource, it's being used in Volcano batch scheduler. + type: string + queue: + description: Queue stands for the resource queue which the application + belongs to, it's being used in Volcano batch scheduler. + type: string + resources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. + If specified, volcano scheduler will consider it as the resources requested. + type: object + type: object + deps: + description: Deps captures all possible types of dependencies of a + Spark application. + properties: + excludePackages: + description: |- + ExcludePackages is a list of "groupId:artifactId", to exclude while resolving the + dependencies provided in Packages to avoid dependency conflicts. + items: type: string - queue: + type: array + files: + description: Files is a list of files the Spark application depends + on. + items: type: string - resources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - deps: - properties: - excludePackages: - items: - type: string - type: array - files: - items: - type: string - type: array - jars: - items: - type: string - type: array - packages: - items: - type: string - type: array - pyFiles: - items: - type: string - type: array - repositories: - items: - type: string - type: array - type: object - driver: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: + type: array + jars: + description: Jars is a list of JAR files the Spark application + depends on. + items: + type: string + type: array + packages: + description: |- + Packages is a list of maven coordinates of jars to include on the driver and executor + classpaths. This will search the local maven repo, then maven central and any additional + remote repositories given by the "repositories" option. + Each package should be of the form "groupId:artifactId:version". + items: + type: string + type: array + pyFiles: + description: PyFiles is a list of Python files the Spark application + depends on. + items: + type: string + type: array + repositories: + description: |- + Repositories is a list of additional remote repositories to search for the maven coordinate + given with the "packages" option. + items: + type: string + type: array + type: object + driver: + description: Driver is the driver specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity settings + for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - operator: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - operator: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. type: string - type: array - required: - - key - - operator + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. type: string - type: array - required: - - key - - operator + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - type: object - type: array + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer required: - - nodeSelectorTerms + - podAffinityTerm + - weight type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: - matchExpressions: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - type: object + type: array + required: + - key + - operator type: object - namespaces: - items: - type: string - type: array - topologyKey: + type: array + matchLabels: + additionalProperties: type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object type: array - matchLabels: - additionalProperties: - type: string - type: object + required: + - key + - operator type: object - namespaces: - items: - type: string - type: array - topologyKey: + type: array + matchLabels: + additionalProperties: type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: - type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object type: array - ip: - type: string type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) properties: - name: - type: string - value: - type: string - valueFrom: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object - secretKeyRef: + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer required: - - name + - podAffinityTerm + - weight type: object type: array - envFrom: + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: - configMapRef: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: - name: - type: string - optional: - type: boolean + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object - prefix: - type: string - secretRef: + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: - name: - type: string - optional: - type: boolean + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey type: object type: array - image: + type: object + type: object + annotations: + additionalProperties: + type: string + description: Annotations are the Kubernetes annotations to be + added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to which + the named objects should be mounted to. + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the driver. + Maps to `spark.kubernetes.driver.request.cores` that is available since Spark 3.0. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + dnsConfig: + description: DnsConfig dns settings for the pod, following the + Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options + of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add to the + pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request for + driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the Kubernetes + specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: type: string - imagePullPolicy: + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host networking + for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides Spec.Image + if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - lifecycle: + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. properties: - postStart: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: + key: + description: The key to select. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean required: - - port + - key type: object - tcpSocket: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: - host: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array + - fieldPath type: object - httpGet: + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: + containerName: + description: 'Container name: required for + volumes, optional for env vars' type: string - port: + divisor: anyOf: - type: integer - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - scheme: + resource: + description: 'Required: resource to select' type: string required: - - port + - resource type: object - tcpSocket: + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace properties: - host: + key: + description: The key of the secret to select + from. Must be a valid secret key. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean required: - - port + - key type: object + x-kubernetes-map-type: atomic type: object + required: + - name type: object - livenessProbe: + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: + configMapRef: + description: The ConfigMap to select from properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string - required: - - port + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - host: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port + optional: + description: Specify whether the Secret must be + defined + type: boolean type: object - timeoutSeconds: - format: int32 - type: integer + x-kubernetes-map-type: atomic type: object - name: - type: string - ports: - items: + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - type: array - drop: - items: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object type: object - type: array - volumeMounts: - items: + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - javaOptions: - type: string - kubernetesMaster: - type: string - labels: - additionalProperties: - type: string - type: object - lifecycle: - properties: - postStart: + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: + description: Exec specifies the action to take. properties: command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object httpGet: + description: HTTPGet specifies the http request to perform. properties: host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. items: + description: HTTPHeader describes a custom header + to be used in HTTP probes properties: name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: + description: The header field value type: string required: - name @@ -1109,49 +1843,211 @@ spec: type: object type: array path: + description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. properties: host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - preStop: - properties: - exec: + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. properties: command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object httpGet: + description: HTTPGet specifies the http request to perform. properties: host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. items: + description: HTTPHeader describes a custom header + to be used in HTTP probes properties: name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: + description: The header field value type: string required: - name @@ -1159,3348 +2055,9494 @@ spec: type: object type: array path: + description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. properties: host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podName: - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' - type: string - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: + resizePolicy: + description: Resources resize policy for the container. items: + description: ContainerResizePolicy represents resource + resize policy for the container. properties: - name: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. type: string - value: + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. type: string required: - - name - - value + - resourceName + - restartPolicy type: object type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ properties: - add: - items: - type: string - type: array - drop: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: - type: string + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object type: object - privileged: - type: boolean - procMount: + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string - type: array - env: - items: + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - name: + level: + description: Level is SELinux level label that applies + to the container. type: string - value: + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object required: - - name + - type type: object - type: array - envFrom: - items: + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - required: - - port - type: object - tcpSocket: - properties: - host: + value: + description: The header field value type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array + - name + - value type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: + type: array + path: + description: Path to access on the HTTP server. type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name type: object - securityContext: + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean - capabilities: - properties: - add: - items: + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the driver. For instance, + GC settings or other logging. + type: string + kubernetesMaster: + description: |- + KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and + other Kubernetes resources. Default to https://kubernetes.default.svc. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added to the + pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: array - drop: - items: + value: + description: The header field value type: string - type: array - type: object - privileged: - type: boolean - procMount: + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. format: int64 type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + required: + - seconds type: object - startupProbe: + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: - exec: - properties: - command: - items: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 type: integer + required: + - seconds type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for the + pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory to + allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podName: + description: |- + PodName is the name of the driver pod that the user creates. This is used for the + in-cluster client mode in which the user creates a client pod where the driver of + the user application runs. It's an error to set this field if Mode is not + in-cluster-client. + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - terminationMessagePolicy: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - tty: + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean - volumeDevices: + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the pods + objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will be + used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add to + the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object + description: Capability represent POSIX capabilities + type + type: string type: array - volumeMounts: + drop: + description: Removed capabilities items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object + description: Capability represent POSIX capabilities + type + type: string type: array - workingDir: - type: string - required: - - name type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - effect: + level: + description: Level is SELinux level label that applies + to the container. type: string - key: + role: + description: Role is a SELinux role label that applies + to the container. type: string - operator: + type: + description: Type is a SELinux type label that applies + to the container. type: string - tolerationSeconds: - format: int64 - type: integer - value: + user: + description: User is a SELinux user label that applies + to the container. type: string type: object - type: array - volumeMounts: - items: + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: - mountPath: - type: string - mountPropagation: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - name: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - readOnly: + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean - subPath: + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - subPathExpr: - type: string - required: - - mountPath - - name type: object - type: array - type: object - dynamicAllocation: - properties: - enabled: - type: boolean - initialExecutors: - format: int32 - type: integer - maxExecutors: - format: int32 - type: integer - minExecutors: - format: int32 - type: integer - shuffleTrackingTimeout: - format: int64 - type: integer - type: object - executor: - properties: - affinity: + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: |- + ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + serviceLabels: + additionalProperties: + type: string + description: |- + ServiceLabels defines the labels to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that run + along side the main Spark container. + items: + description: A single application container that you want to + run within a pod. properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. type: string required: - - topologyKey + - fieldPath type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' type: string required: - - topologyKey + - resource type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey + x-kubernetes-map-type: atomic type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - deleteOnTermination: - type: boolean - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string + required: + - name type: object type: array - searches: + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: + description: EnvFromSource represents the source of a + set of ConfigMaps properties: - configMapKeyRef: + configMapRef: + description: The ConfigMap to select from properties: - key: - type: string name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: + description: Specify whether the ConfigMap must + be defined type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource type: object - secretKeyRef: + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - key: - type: string name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: + description: Specify whether the Secret must be + defined type: boolean - required: - - key type: object + x-kubernetes-map-type: atomic type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - configMapRef: + exec: + description: Exec specifies the action to take. properties: - name: - type: string - optional: - type: boolean + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array type: object - prefix: - type: string - secretRef: + httpGet: + description: HTTPGet specifies the http request + to perform. properties: - name: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - optional: - type: boolean - type: object + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - required: - - port - type: object - tcpSocket: - properties: - host: + value: + description: The header field value type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port + - name + - value type: object - type: object - type: object - livenessProbe: + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. format: int32 type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. format: int32 type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort type: object - name: - type: string - ports: - items: + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. format: int32 type: integer - name: - type: string - protocol: + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - - containerPort - - protocol + - port type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - - port + - name type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - devicePath: + level: + description: Level is SELinux level label that applies + to the container. type: string - name: + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. type: string - required: - - devicePath - - name type: object - type: array - volumeMounts: - items: + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: - mountPath: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - mountPropagation: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - name: + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string - readOnly: - type: boolean - subPath: + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. type: string - subPathExpr: + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - required: - - mountPath - - name type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - instances: - format: int32 - minimum: 1 - type: integer - javaOptions: - type: string - labels: - additionalProperties: - type: string - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - privileged: + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. type: boolean - procMount: + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. type: string - readOnlyRootFilesystem: + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. type: boolean - runAsGroup: + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed in ".spec.tolerations" + to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer - runAsNonRoot: + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in ".spec.volumes" + to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name type: object - serviceAccount: - type: string - shareProcessNamespace: - type: boolean - sidecars: + type: array + type: object + driverIngressOptions: + description: DriverIngressOptions allows configuring the Service and + the Ingress to expose ports inside Spark Driver + items: + description: DriverIngressConfiguration is for driver ingress specific + configuration parameters. + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL certificates + to the ingress object items: + description: IngressTLS describes the transport layer security + associated with an ingress. properties: - args: - items: - type: string - type: array - command: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. items: type: string type: array - env: + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + ingressURLFormat: + description: IngressURLFormat is the URL for the ingress. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLabels is a map of key,value pairs of labels + that might be added to the service object. + type: object + servicePort: + description: ServicePort allows configuring the port at service + level that might be different from the targetPort. + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + type: string + serviceType: + description: ServiceType allows configuring the type of the + service. Defaults to ClusterIP. + type: string + required: + - servicePort + - servicePortName + type: object + type: array + dynamicAllocation: + description: |- + DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes + scheduler backend since Spark 3.0. + properties: + enabled: + description: Enabled controls whether dynamic allocation is enabled + or not. + type: boolean + initialExecutors: + description: |- + InitialExecutors is the initial number of executors to request. If .spec.executor.instances + is also set, the initial number of executors is set to the bigger of that and this option. + format: int32 + type: integer + maxExecutors: + description: MaxExecutors is the upper bound for the number of + executors if dynamic allocation is enabled. + format: int32 + type: integer + minExecutors: + description: MinExecutors is the lower bound for the number of + executors if dynamic allocation is enabled. + format: int32 + type: integer + shuffleTrackingTimeout: + description: |- + ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding + shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled). + format: int64 + type: integer + type: object + executor: + description: Executor is the executor specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity settings + for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: - name: - type: string - value: - type: string - valueFrom: + preference: + description: A node selector term, associated with + the corresponding weight. properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object + - preference + - weight type: object type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - value: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer - name: - type: string - protocol: - type: string required: - - containerPort - - protocol + - podAffinityTerm + - weight type: object type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - - mountPath - - name + - topologyKey type: object type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name type: object - type: array - type: object - failureRetries: - format: int32 - type: integer - hadoopConf: - additionalProperties: - type: string - type: object - hadoopConfigMap: - type: string - image: - type: string - imagePullPolicy: - type: string - imagePullSecrets: - items: - type: string - type: array - mainApplicationFile: - type: string - mainClass: - type: string - memoryOverheadFactor: - type: string - mode: - enum: - - cluster - - client - type: string - monitoring: - properties: - exposeDriverMetrics: - type: boolean - exposeExecutorMetrics: - type: boolean - metricsProperties: - type: string - metricsPropertiesFile: + type: object + annotations: + additionalProperties: type: string - prometheus: + description: Annotations are the Kubernetes annotations to be + added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to which + the named objects should be mounted to. properties: - configFile: - type: string - configuration: - type: string - jmxExporterJar: + name: type: string - port: - format: int32 - maximum: 49151 - minimum: 1024 - type: integer - portName: + path: type: string required: - - jmxExporterJar + - name + - path type: object - required: - - exposeDriverMetrics - - exposeExecutorMetrics - type: object - nodeSelector: - additionalProperties: + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional type: string - type: object - proxyUser: - type: string - pythonVersion: - enum: - - "2" - - "3" - type: string - restartPolicy: - properties: - onFailureRetries: - format: int32 - minimum: 0 - type: integer - onFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - onSubmissionFailureRetries: - format: int32 - minimum: 0 - type: integer - onSubmissionFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - type: - enum: - - Never - - Always - - OnFailure - type: string - type: object - retryInterval: - format: int64 - type: integer - sparkConf: - additionalProperties: + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the executors. + Maps to `spark.kubernetes.executor.request.cores` that is available since Spark 2.4. type: string - type: object - sparkConfigMap: - type: string - sparkUIOptions: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string - type: object - driverIngressOptions: - items: + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + deleteOnTermination: + description: |- + DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. + Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. + type: boolean + dnsConfig: + description: DnsConfig dns settings for the pod, following the + Kubernetes specifications. properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressURLFormat: - type: string - ingressAnnotations: - additionalProperties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: type: string - type: object - ingressTLS: + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. items: + description: PodDNSConfigOption defines DNS resolver options + of a pod. properties: - hosts: - items: - type: string - type: array - secretName: + name: + description: Required. + type: string + value: type: string type: object type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string - type: object - type: array - sparkVersion: - type: string - timeToLiveSeconds: - format: int64 - type: integer - type: - enum: - - Java - - Python - - Scala - - R - type: string - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - required: - - secretName - - shareName - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - monitors - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeID: - type: string - required: - - volumeID - type: object - configMap: - properties: - defaultMode: - format: int32 - type: integer - items: - items: + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add to the + pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: key: + description: The key to select. type: string - mode: - format: int32 - type: integer - path: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean required: - key - - path type: object - type: array - name: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request for + driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the Kubernetes + specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: type: string - optional: - type: boolean - type: object - csi: - properties: - driver: + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host networking + for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides Spec.Image + if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - fsType: + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - nodePublishSecretRef: + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. properties: name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name type: object - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - required: - - driver - type: object - downwardAPI: - properties: - defaultMode: - format: int32 - type: integer + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: - items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - fieldRef: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. properties: - apiVersion: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - fieldPath: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - fieldPath + - port type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: - containerName: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string - divisor: + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - resource: - type: string required: - - resource + - port type: object - required: - - path type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - spec: - properties: - accessModes: - items: + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - type: array - resources: - properties: - requests: + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: - storage: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: object - type: object - storageClassName: - type: string - type: object - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - format: int32 - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - required: - - driver - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - pdName: - type: string - readOnly: - type: boolean - required: - - pdName - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - required: - - repository - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - required: - - endpoints - - path - type: object - hostPath: - properties: - path: - type: string - type: - type: string - required: - - path - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - format: int32 - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - targetPortal: - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - required: - - pdID - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - projected: - properties: - defaultMode: - format: int32 - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path + value: + description: The header field value + type: string + required: + - name + - value type: object type: array - name: + path: + description: Path to access on the HTTP server. type: string - optional: - type: boolean - type: object - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - type: boolean + required: + - port type: object - serviceAccountToken: + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. properties: - audience: - type: string - expirationSeconds: + seconds: + description: Seconds is the number of seconds + to sleep. format: int64 type: integer - path: + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - path + - port type: object type: object - type: array - required: - - sources - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - required: - - registry - - volume - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - image - - monitors - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - properties: - defaultMode: - format: int32 - type: integer - items: - items: + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. properties: - key: - type: string - mode: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. format: int32 type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - key - - path + - port type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + instances: + description: Instances is the number of executor instances. + format: int32 + minimum: 1 + type: integer + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the executors. For instance, + GC settings or other logging. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added to the + pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for the + pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory to + allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the pods + objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will be + used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add to + the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that run + along side the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed in ".spec.tolerations" + to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in ".spec.volumes" + to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + failureRetries: + description: |- + FailureRetries is the number of times to retry a failed application before giving up. + This is best effort and actual retry attempts can be >= the value specified. + format: int32 + type: integer + hadoopConf: + additionalProperties: + type: string + description: |- + HadoopConf carries user-specified Hadoop configuration properties as they would use the the "--conf" option + in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop + configuration properties. + type: object + hadoopConfigMap: + description: |- + HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. + The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + image: + description: |- + Image is the container image for the driver, executor, and init-container. Any custom container images for the + driver, executor, or init-container takes precedence over this. + type: string + imagePullPolicy: + description: ImagePullPolicy is the image pull policy for the driver, + executor, and init-container. + type: string + imagePullSecrets: + description: ImagePullSecrets is the list of image-pull secrets. + items: + type: string + type: array + mainApplicationFile: + description: MainFile is the path to a bundled JAR, Python, or R file + of the application. + type: string + mainClass: + description: |- + MainClass is the fully-qualified main class of the Spark application. + This only applies to Java/Scala Spark applications. + type: string + memoryOverheadFactor: + description: |- + This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. + For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will + be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set. + type: string + mode: + description: Mode is the deployment mode of the Spark application. + enum: + - cluster + - client + type: string + monitoring: + description: Monitoring configures how monitoring is handled. + properties: + exposeDriverMetrics: + description: ExposeDriverMetrics specifies whether to expose metrics + on the driver. + type: boolean + exposeExecutorMetrics: + description: ExposeExecutorMetrics specifies whether to expose + metrics on the executors. + type: boolean + metricsProperties: + description: |- + MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. + If not specified, the content in spark-docker/conf/metrics.properties will be used. + type: string + metricsPropertiesFile: + description: |- + MetricsPropertiesFile is the container local path of file metrics.properties for configuring + the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used. + type: string + prometheus: + description: Prometheus is for configuring the Prometheus JMX + exporter. + properties: + configFile: + description: |- + ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. + ConfigFile takes precedence over Configuration, which is shown below. + type: string + configuration: + description: |- + Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. + If not specified, the content in spark-docker/conf/prometheus.yaml will be used. + Configuration has no effect if ConfigFile is set. + type: string + jmxExporterJar: + description: JmxExporterJar is the path to the Prometheus + JMX exporter jar in the container. + type: string + port: + description: |- + Port is the port of the HTTP server run by the Prometheus JMX exporter. + If not specified, 8090 will be used as the default. + format: int32 + maximum: 49151 + minimum: 1024 + type: integer + portName: + description: |- + PortName is the port name of prometheus JMX exporter port. + If not specified, jmx-exporter will be used as the default. + type: string + required: + - jmxExporterJar + type: object + required: + - exposeDriverMetrics + - exposeExecutorMetrics + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). + This field will be deprecated in future versions (at SparkApplicationSpec level). + type: object + proxyUser: + description: |- + ProxyUser specifies the user to impersonate when submitting the application. + It maps to the command-line flag "--proxy-user" in spark-submit. + type: string + pythonVersion: + description: |- + This sets the major Python version of the docker + image used to run the driver and executor containers. Can either be 2 or 3, default 2. + enum: + - "2" + - "3" + type: string + restartPolicy: + description: RestartPolicy defines the policy on if and in which conditions + the controller should restart an application. + properties: + onFailureRetries: + description: OnFailureRetries the number of times to retry running + an application before giving up. + format: int32 + minimum: 0 + type: integer + onFailureRetryInterval: + description: OnFailureRetryInterval is the interval in seconds + between retries on failed runs. + format: int64 + minimum: 1 + type: integer + onSubmissionFailureRetries: + description: |- + OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. + This is best effort and actual retry attempts can be >= the value specified due to caching. + These are required if RestartPolicy is OnFailure. + format: int32 + minimum: 0 + type: integer + onSubmissionFailureRetryInterval: + description: OnSubmissionFailureRetryInterval is the interval + in seconds between retries on failed submissions. + format: int64 + minimum: 1 + type: integer + type: + description: Type specifies the RestartPolicyType. + enum: + - Never + - Always + - OnFailure + type: string + type: object + retryInterval: + description: RetryInterval is the unit of intervals in seconds between + submission retries. + format: int64 + type: integer + sparkConf: + additionalProperties: + type: string + description: |- + SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in + spark-submit. + type: object + sparkConfigMap: + description: |- + SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. + The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + sparkUIOptions: + description: SparkUIOptions allows configuring the Service and the + Ingress to expose the sparkUI + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs of + annotations that might be added to the ingress object. i.e. + specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL certificates + to the ingress object + items: + description: IngressTLS describes the transport layer security + associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs of + annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLabels is a map of key,value pairs of labels + that might be added to the service object. + type: object + servicePort: + description: |- + ServicePort allows configuring the port at service level that might be different from the targetPort. + TargetPort should be the same as the one defined in spark.ui.port + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + Defaults to spark-driver-ui-port. + type: string + serviceType: + description: ServiceType allows configuring the type of the service. + Defaults to ClusterIP. + type: string + type: object + sparkVersion: + description: SparkVersion is the version of Spark the application + uses. + type: string + timeToLiveSeconds: + description: |- + TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication + after its termination. + The SparkApplication object will be garbage collected if the current time is more than the + TimeToLiveSeconds since its termination. + format: int64 + type: integer + type: + description: Type tells the type of the Spark application. + enum: + - Java + - Python + - Scala + - R + type: string + volumes: + description: Volumes is the list of Kubernetes volumes that can be + mounted by the driver and/or executors. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: type: string - storagePolicyID: + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: type: string - storagePolicyName: + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: type: string - volumePath: + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - required: - - driver - - executor - - sparkVersion - - type - type: object - status: - properties: - applicationState: - properties: - errorMessage: - type: string - state: + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object required: - - state - type: object - driverInfo: - properties: - podName: - type: string - webUIAddress: - type: string - webUIIngressAddress: - type: string - webUIIngressName: - type: string - webUIPort: - format: int32 - type: integer - webUIServiceName: - type: string + - name type: object - executionAttempts: - format: int32 - type: integer - executorState: - additionalProperties: + type: array + required: + - driver + - executor + - sparkVersion + - type + type: object + status: + description: SparkApplicationStatus defines the observed state of SparkApplication + properties: + applicationState: + description: AppState tells the overall application state. + properties: + errorMessage: type: string - type: object - lastSubmissionAttemptTime: - format: date-time - nullable: true - type: string - sparkApplicationId: - type: string - submissionAttempts: - format: int32 - type: integer - submissionID: - type: string - terminationTime: - format: date-time - nullable: true + state: + description: ApplicationStateType represents the type of the current + state of an application. + type: string + required: + - state + type: object + driverInfo: + description: DriverInfo has information about the driver. + properties: + podName: + type: string + webUIAddress: + description: UI Details for the UI created via ClusterIP service + accessible from within the cluster. + type: string + webUIIngressAddress: + type: string + webUIIngressName: + description: Ingress Details if an ingress for the UI was created. + type: string + webUIPort: + format: int32 + type: integer + webUIServiceName: + type: string + type: object + executionAttempts: + description: |- + ExecutionAttempts is the total number of attempts to run a submitted application to completion. + Incremented upon each attempted run of the application and reset upon invalidation. + format: int32 + type: integer + executorState: + additionalProperties: + description: ExecutorState tells the current state of an executor. type: string - required: - - driverInfo - type: object - required: - - metadata - - spec - type: object -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] + description: ExecutorState records the state of executors by executor + Pod names. + type: object + lastSubmissionAttemptTime: + description: LastSubmissionAttemptTime is the time for the last application + submission attempt. + format: date-time + nullable: true + type: string + sparkApplicationId: + description: SparkApplicationID is set by the spark-distribution(via + spark.app.id config) on the driver and executor pods + type: string + submissionAttempts: + description: |- + SubmissionAttempts is the total number of attempts to submit an application to run. + Incremented upon each attempted submission of the application and reset upon invalidation and rerun. + format: int32 + type: integer + submissionID: + description: SubmissionID is a unique ID of the current submission + of the application. + type: string + terminationTime: + description: CompletionTime is the time when the application runs + to completion if it does. + format: date-time + nullable: true + type: string + required: + - driverInfo + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/spark-operator-chart/templates/_helpers.tpl b/charts/spark-operator-chart/templates/_helpers.tpl index 8e884ee9d..82845b4e4 100644 --- a/charts/spark-operator-chart/templates/_helpers.tpl +++ b/charts/spark-operator-chart/templates/_helpers.tpl @@ -1,3 +1,19 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + {{/* vim: set filetype=mustache: */}} {{/* Expand the name of the chart. @@ -37,13 +53,13 @@ Common labels {{- define "spark-operator.labels" -}} helm.sh/chart: {{ include "spark-operator.chart" . }} {{ include "spark-operator.selectorLabels" . }} -{{- if .Values.commonLabels }} -{{ toYaml .Values.commonLabels }} -{{- end }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.commonLabels }} +{{ toYaml . }} +{{- end }} {{- end }} {{/* @@ -55,25 +71,8 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* -Create the name of the service account to be used by the operator +Spark Operator image */}} -{{- define "spark-operator.serviceAccountName" -}} -{{- if .Values.serviceAccounts.sparkoperator.create -}} -{{ default (include "spark-operator.fullname" .) .Values.serviceAccounts.sparkoperator.name }} -{{- else -}} -{{ default "default" .Values.serviceAccounts.sparkoperator.name }} +{{- define "spark-operator.image" -}} +{{ printf "%s/%s:%s" .Values.image.registry .Values.image.repository (.Values.image.tag | default .Chart.AppVersion) }} {{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to be used by spark apps -*/}} -{{- define "spark.serviceAccountName" -}} -{{- if .Values.serviceAccounts.spark.create -}} -{{- $sparkServiceaccount := printf "%s-%s" .Release.Name "spark" -}} - {{ default $sparkServiceaccount .Values.serviceAccounts.spark.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.spark.name }} -{{- end -}} -{{- end -}} - diff --git a/charts/spark-operator-chart/templates/controller/_helpers.tpl b/charts/spark-operator-chart/templates/controller/_helpers.tpl new file mode 100644 index 000000000..e5b9457b2 --- /dev/null +++ b/charts/spark-operator-chart/templates/controller/_helpers.tpl @@ -0,0 +1,70 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{/* +Create the name of controller component +*/}} +{{- define "spark-operator.controller.name" -}} +{{- include "spark-operator.fullname" . }}-controller +{{- end -}} + +{{/* +Common labels for the controller +*/}} +{{- define "spark-operator.controller.labels" -}} +{{ include "spark-operator.labels" . }} +app.kubernetes.io/component: controller +{{- end -}} + +{{/* +Selector labels for the controller +*/}} +{{- define "spark-operator.controller.selectorLabels" -}} +{{ include "spark-operator.selectorLabels" . }} +app.kubernetes.io/component: controller +{{- end -}} + +{{/* +Create the name of the service account to be used by the controller +*/}} +{{- define "spark-operator.controller.serviceAccountName" -}} +{{- if .Values.controller.serviceAccount.create -}} +{{ .Values.controller.serviceAccount.name | default (include "spark-operator.controller.name" .) }} +{{- else -}} +{{ .Values.controller.serviceAccount.name | default "default" }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the deployment to be used by controller +*/}} +{{- define "spark-operator.controller.deploymentName" -}} +{{ include "spark-operator.controller.name" . }} +{{- end -}} + +{{/* +Create the name of the lease resource to be used by leader election +*/}} +{{- define "spark-operator.controller.leaderElectionName" -}} +{{ include "spark-operator.controller.name" . }}-lock +{{- end -}} + +{{/* +Create the name of the pod disruption budget to be used by controller +*/}} +{{- define "spark-operator.controller.podDisruptionBudgetName" -}} +{{ include "spark-operator.controller.name" . }}-pdb +{{- end -}} diff --git a/charts/spark-operator-chart/templates/controller/deployment.yaml b/charts/spark-operator-chart/templates/controller/deployment.yaml new file mode 100644 index 000000000..02f9c2c90 --- /dev/null +++ b/charts/spark-operator-chart/templates/controller/deployment.yaml @@ -0,0 +1,162 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "spark-operator.controller.deploymentName" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} +spec: + {{- with .Values.controller.replicas }} + replicas: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "spark-operator.controller.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "spark-operator.controller.selectorLabels" . | nindent 8 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.controller.annotations .Values.prometheus.metrics.enable }} + annotations: + {{- if .Values.prometheus.metrics.enable }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ .Values.prometheus.metrics.port | quote }} + prometheus.io/path: {{ .Values.prometheus.metrics.endpoint }} + {{- end }} + {{- with .Values.controller.annotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + spec: + containers: + - name: spark-operator-controller + image: {{ include "spark-operator.image" . }} + {{- with .Values.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + args: + - controller + - start + {{- with .Values.controller.logLevel }} + - --zap-log-level={{ . }} + {{- end }} + {{- with .Values.spark.jobNamespaces }} + - --namespaces={{ . | join "," }} + {{- end }} + - --controller-threads={{ .Values.controller.workers }} + {{- with .Values.controller.uiService.enable }} + - --enable-ui-service=true + {{- end }} + {{- if .Values.controller.uiIngress.enable }} + {{- with .Values.controller.uiIngress.urlFormat }} + - --ingress-url-format={{ . }} + {{- end }} + {{- end }} + {{- with .Values.controller.batchScheduler.enable }} + - --enable-batch-scheduler=true + {{- end }} + {{- if .Values.prometheus.metrics.enable }} + - --enable-metrics=true + - --metrics-bind-address=:{{ .Values.prometheus.metrics.port }} + - --metrics-endpoint={{ .Values.prometheus.metrics.endpoint }} + - --metrics-prefix={{ .Values.prometheus.metrics.prefix }} + - --metrics-labels=app_type + {{- end }} + - --leader-election=true + - --leader-election-lock-name={{ include "spark-operator.controller.leaderElectionName" . }} + - --leader-election-lock-namespace={{ .Release.Namespace }} + {{- if .Values.prometheus.metrics.enable }} + ports: + - name: {{ .Values.prometheus.metrics.portName | quote }} + containerPort: {{ .Values.prometheus.metrics.port }} + {{- end }} + {{- with .Values.controller.env }} + env: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.envFrom }} + envFrom: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.resources }} + resources: + {{- toYaml . | nindent 10 }} + {{- end }} + livenessProbe: + httpGet: + port: 8081 + scheme: HTTP + path: /healthz + readinessProbe: + httpGet: + port: 8081 + scheme: HTTP + path: /readyz + {{- with .Values.controller.securityContext }} + securityContext: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.controller.sidecars }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.controller.volumes }} + volumes: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.tolerations }} + tolerations: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.controller.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + serviceAccountName: {{ include "spark-operator.controller.serviceAccountName" . }} + {{- with .Values.controller.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + {{- if le (int .Values.controller.replicas) 1 }} + {{- fail "controller.replicas must be greater than 1 to enable topology spread constraints for controller pods"}} + {{- end }} + {{- $selectorLabels := include "spark-operator.controller.selectorLabels" . | fromYaml }} + {{- $labelSelectorDict := dict "labelSelector" ( dict "matchLabels" $selectorLabels ) }} + topologySpreadConstraints: + {{- range .Values.controller.topologySpreadConstraints }} + - {{ mergeOverwrite . $labelSelectorDict | toYaml | nindent 8 | trim }} + {{- end }} + {{- end }} diff --git a/charts/spark-operator-chart/templates/controller/poddisruptionbudget.yaml b/charts/spark-operator-chart/templates/controller/poddisruptionbudget.yaml new file mode 100644 index 000000000..38c748d6f --- /dev/null +++ b/charts/spark-operator-chart/templates/controller/poddisruptionbudget.yaml @@ -0,0 +1,34 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.podDisruptionBudget.enable }} +{{- if le (int .Values.controller.replicas) 1 }} +{{- fail "controller.replicas must be greater than 1 to enable pod disruption budget for controller" }} +{{- end -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "spark-operator.controller.podDisruptionBudgetName" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "spark-operator.controller.selectorLabels" . | nindent 6 }} + {{- with .Values.controller.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} + {{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/controller/rbac.yaml b/charts/spark-operator-chart/templates/controller/rbac.yaml new file mode 100644 index 000000000..472d0fcc7 --- /dev/null +++ b/charts/spark-operator-chart/templates/controller/rbac.yaml @@ -0,0 +1,201 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "spark-operator.controller.name" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} + {{- with .Values.controller.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - create + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - events + verbs: + - create + - update + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + - scheduledsparkapplications + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/status + - scheduledsparkapplications/status + verbs: + - get + - update + - patch +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/finalizers + - scheduledsparkapplications/finalizers + verbs: + - update +{{- if .Values.controller.batchScheduler.enable }} +{{/* required for the `volcano` batch scheduler */}} +- apiGroups: + - scheduling.incubator.k8s.io + - scheduling.sigs.dev + - scheduling.volcano.sh + resources: + - podgroups + verbs: + - "*" +{{- end }} +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "spark-operator.controller.name" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} + {{- with .Values.controller.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "spark-operator.controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "spark-operator.controller.name" . }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "spark-operator.controller.name" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} + {{- with .Values.controller.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - {{ include "spark-operator.controller.leaderElectionName" . }} + verbs: + - get + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "spark-operator.controller.name" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} + {{- with .Values.controller.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "spark-operator.controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "spark-operator.controller.name" . }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/controller/serviceaccount.yaml b/charts/spark-operator-chart/templates/controller/serviceaccount.yaml new file mode 100644 index 000000000..126e4245c --- /dev/null +++ b/charts/spark-operator-chart/templates/controller/serviceaccount.yaml @@ -0,0 +1,28 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "spark-operator.controller.serviceAccountName" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} + {{- with .Values.controller.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml deleted file mode 100644 index cf12fb2e8..000000000 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ /dev/null @@ -1,140 +0,0 @@ -# If the admission webhook is enabled, then a post-install step is required -# to generate and install the secret in the operator namespace. - -# In the post-install hook, the token corresponding to the operator service account -# is used to authenticate with the Kubernetes API server to install the secret bundle. -{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "spark-operator.fullname" . }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "spark-operator.selectorLabels" . | nindent 6 }} - strategy: - type: Recreate - template: - metadata: - {{- if or .Values.podAnnotations .Values.metrics.enable }} - annotations: - {{- if .Values.metrics.enable }} - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.port }}" - prometheus.io/path: {{ .Values.metrics.endpoint }} - {{- end }} - {{- if .Values.podAnnotations }} - {{- toYaml .Values.podAnnotations | trim | nindent 8 }} - {{- end }} - {{- end }} - labels: - {{- include "spark-operator.selectorLabels" . | nindent 8 }} - {{- with .Values.podLabels }} - {{- toYaml . | trim | nindent 8 }} - {{- end }} - spec: - serviceAccountName: {{ include "spark-operator.serviceAccountName" . }} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - image: {{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if gt (int .Values.replicaCount) 1 }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - {{- end }} - envFrom: - {{- toYaml .Values.envFrom | nindent 10 }} - securityContext: - {{- toYaml .Values.securityContext | nindent 10 }} - {{- if or .Values.metrics.enable .Values.webhook.enable }} - ports: - {{ if .Values.metrics.enable -}} - - name: {{ .Values.metrics.portName | quote }} - containerPort: {{ .Values.metrics.port }} - {{- end }} - {{ if .Values.webhook.enable -}} - - name: {{ .Values.webhook.portName | quote }} - containerPort: {{ .Values.webhook.port }} - {{- end }} - {{ end -}} - args: - - -v={{ .Values.logLevel }} - - -logtostderr - {{- if eq (len $jobNamespaces) 1 }} - - -namespace={{ index $jobNamespaces 0 }} - {{- end }} - - -enable-ui-service={{ .Values.uiService.enable}} - - -ingress-url-format={{ .Values.ingressUrlFormat }} - - -controller-threads={{ .Values.controllerThreads }} - - -resync-interval={{ .Values.resyncInterval }} - - -enable-batch-scheduler={{ .Values.batchScheduler.enable }} - - -label-selector-filter={{ .Values.labelSelectorFilter }} - {{- if .Values.metrics.enable }} - - -enable-metrics=true - - -metrics-labels=app_type - - -metrics-port={{ .Values.metrics.port }} - - -metrics-endpoint={{ .Values.metrics.endpoint }} - - -metrics-prefix={{ .Values.metrics.prefix }} - {{- end }} - {{- if .Values.webhook.enable }} - - -enable-webhook=true - - -webhook-secret-name={{ include "spark-operator.webhookSecretName" . }} - - -webhook-secret-namespace={{ .Release.Namespace }} - - -webhook-svc-name={{ include "spark-operator.webhookServiceName" . }} - - -webhook-svc-namespace={{ .Release.Namespace }} - - -webhook-config-name={{ include "spark-operator.fullname" . }}-webhook-config - - -webhook-port={{ .Values.webhook.port }} - - -webhook-timeout={{ .Values.webhook.timeout }} - - -webhook-namespace-selector={{ .Values.webhook.namespaceSelector }} - - -webhook-object-selector={{ .Values.webhook.objectSelector }} - {{- end }} - - -enable-resource-quota-enforcement={{ .Values.resourceQuotaEnforcement.enable }} - {{- if gt (int .Values.replicaCount) 1 }} - - -leader-election=true - - -leader-election-lock-namespace={{ default .Release.Namespace .Values.leaderElection.lockNamespace }} - - -leader-election-lock-name={{ .Values.leaderElection.lockName }} - {{- end }} - {{- with .Values.resources }} - resources: - {{- toYaml . | nindent 10 }} - {{- end }} - {{- with .Values.volumeMounts }} - volumeMounts: - {{- toYaml . | nindent 10 }} - {{- end }} - {{- with .Values.sidecars }} - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.volumes }} - volumes: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- if .Values.priorityClassName }} - priorityClassName: {{ .Values.priorityClassName }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/charts/spark-operator-chart/templates/poddisruptionbudget.yaml b/charts/spark-operator-chart/templates/poddisruptionbudget.yaml deleted file mode 100644 index 317f8bdb9..000000000 --- a/charts/spark-operator-chart/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if $.Values.podDisruptionBudget.enable }} -{{- if (gt (int $.Values.replicaCount) 1) }} -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: {{ include "spark-operator.fullname" . }}-pdb - labels: - {{- include "spark-operator.labels" . | nindent 4 }} -spec: - selector: - matchLabels: - {{- include "spark-operator.selectorLabels" . | nindent 6 }} - minAvailable: {{ $.Values.podDisruptionBudget.minAvailable }} -{{- else }} -{{- fail "replicaCount must be greater than 1 to enable PodDisruptionBudget" }} -{{- end }} -{{- end }} diff --git a/charts/spark-operator-chart/templates/prometheus-podmonitor.yaml b/charts/spark-operator-chart/templates/prometheus-podmonitor.yaml deleted file mode 100644 index eec380d74..000000000 --- a/charts/spark-operator-chart/templates/prometheus-podmonitor.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{ if and .Values.metrics.enable .Values.podMonitor.enable }} -apiVersion: monitoring.coreos.com/v1 -kind: PodMonitor -metadata: - name: {{ include "spark-operator.name" . -}}-podmonitor - labels: {{ toYaml .Values.podMonitor.labels | nindent 4 }} -spec: - podMetricsEndpoints: - - interval: {{ .Values.podMonitor.podMetricsEndpoint.interval }} - port: {{ .Values.metrics.portName | quote }} - scheme: {{ .Values.podMonitor.podMetricsEndpoint.scheme }} - jobLabel: {{ .Values.podMonitor.jobLabel }} - namespaceSelector: - matchNames: - - {{ .Release.Namespace }} - selector: - matchLabels: - {{- include "spark-operator.selectorLabels" . | nindent 6 }} -{{ end }} \ No newline at end of file diff --git a/charts/spark-operator-chart/templates/prometheus/_helpers.tpl b/charts/spark-operator-chart/templates/prometheus/_helpers.tpl new file mode 100644 index 000000000..d767419b5 --- /dev/null +++ b/charts/spark-operator-chart/templates/prometheus/_helpers.tpl @@ -0,0 +1,22 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{/* +Create the name of pod monitor +*/}} +{{- define "spark-operator.prometheus.podMonitorName" -}} +{{- include "spark-operator.fullname" . }}-podmonitor +{{- end -}} diff --git a/charts/spark-operator-chart/templates/prometheus/podmonitor.yaml b/charts/spark-operator-chart/templates/prometheus/podmonitor.yaml new file mode 100644 index 000000000..a9c5289c0 --- /dev/null +++ b/charts/spark-operator-chart/templates/prometheus/podmonitor.yaml @@ -0,0 +1,44 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.prometheus.podMonitor.create -}} +{{- if not .Values.prometheus.metrics.enable }} +{{- fail "`metrics.enable` must be set to true when `podMonitor.create` is true." }} +{{- end }} +{{- if not (.Capabilities.APIVersions.Has "monitoring.coreos.com/v1/PodMonitor") }} +{{- fail "The cluster does not support the required API version `monitoring.coreos.com/v1` for `PodMonitor`." }} +{{- end }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "spark-operator.prometheus.podMonitorName" . }} + {{- with .Values.prometheus.podMonitor.labels }} + labels: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - interval: {{ .Values.prometheus.podMonitor.podMetricsEndpoint.interval }} + port: {{ .Values.prometheus.metrics.portName | quote }} + scheme: {{ .Values.prometheus.podMonitor.podMetricsEndpoint.scheme }} + jobLabel: {{ .Values.prometheus.podMonitor.jobLabel }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "spark-operator.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/rbac.yaml b/charts/spark-operator-chart/templates/rbac.yaml deleted file mode 100644 index aa110ff49..000000000 --- a/charts/spark-operator-chart/templates/rbac.yaml +++ /dev/null @@ -1,148 +0,0 @@ -{{- if or .Values.rbac.create .Values.rbac.createClusterRole -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ include "spark-operator.fullname" . }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} - {{- with .Values.rbac.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -rules: -- apiGroups: - - "" - resources: - - pods - - persistentvolumeclaims - verbs: - - "*" -- apiGroups: - - "" - resources: - - services - - configmaps - - secrets - verbs: - - create - - get - - delete - - update - - patch -- apiGroups: - - extensions - - networking.k8s.io - resources: - - ingresses - verbs: - - create - - get - - delete -- apiGroups: - - "" - resources: - - nodes - verbs: - - get -- apiGroups: - - "" - resources: - - events - verbs: - - create - - update - - patch -- apiGroups: - - "" - resources: - - resourcequotas - verbs: - - get - - list - - watch -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - create - - get - - update - - delete -- apiGroups: - - sparkoperator.k8s.io - resources: - - sparkapplications - - sparkapplications/status - - sparkapplications/finalizers - - scheduledsparkapplications - - scheduledsparkapplications/status - - scheduledsparkapplications/finalizers - verbs: - - "*" - {{- if .Values.batchScheduler.enable }} - # required for the `volcano` batch scheduler -- apiGroups: - - scheduling.incubator.k8s.io - - scheduling.sigs.dev - - scheduling.volcano.sh - resources: - - podgroups - verbs: - - "*" - {{- end }} - {{ if .Values.webhook.enable }} -- apiGroups: - - batch - resources: - - jobs - verbs: - - delete - {{- end }} - {{- if gt (int .Values.replicaCount) 1 }} -- apiGroups: - - coordination.k8s.io - resources: - - leases - resourceNames: - - {{ .Values.leaderElection.lockName }} - verbs: - - get - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - {{- end }} ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ include "spark-operator.fullname" . }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} - {{- with .Values.rbac.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -subjects: - - kind: ServiceAccount - name: {{ include "spark-operator.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ include "spark-operator.fullname" . }} - apiGroup: rbac.authorization.k8s.io -{{- end }} diff --git a/charts/spark-operator-chart/templates/serviceaccount.yaml b/charts/spark-operator-chart/templates/serviceaccount.yaml deleted file mode 100644 index a75f23190..000000000 --- a/charts/spark-operator-chart/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccounts.sparkoperator.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "spark-operator.serviceAccountName" . }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} - {{- with .Values.serviceAccounts.sparkoperator.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/charts/spark-operator-chart/templates/spark-rbac.yaml b/charts/spark-operator-chart/templates/spark-rbac.yaml deleted file mode 100644 index bbf9da620..000000000 --- a/charts/spark-operator-chart/templates/spark-rbac.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if or .Values.rbac.create .Values.rbac.createRole }} -{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }} -{{- range $jobNamespace := $jobNamespaces }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: spark-role - namespace: {{ $jobNamespace }} - labels: - {{- include "spark-operator.labels" $ | nindent 4 }} -rules: -- apiGroups: - - "" - resources: - - pods - - services - - configmaps - - persistentvolumeclaims - verbs: - - "*" ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: spark - namespace: {{ $jobNamespace }} - labels: - {{- include "spark-operator.labels" $ | nindent 4 }} -subjects: -- kind: ServiceAccount - name: {{ include "spark.serviceAccountName" $ }} - namespace: {{ $jobNamespace }} -roleRef: - kind: Role - name: spark-role - apiGroup: rbac.authorization.k8s.io -{{- end }} -{{- end }} diff --git a/charts/spark-operator-chart/templates/spark-serviceaccount.yaml b/charts/spark-operator-chart/templates/spark-serviceaccount.yaml deleted file mode 100644 index af8e8d7f9..000000000 --- a/charts/spark-operator-chart/templates/spark-serviceaccount.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.serviceAccounts.spark.create }} -{{- range $sparkJobNamespace := .Values.sparkJobNamespaces | default (list .Release.Namespace) }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "spark.serviceAccountName" $ }} - namespace: {{ $sparkJobNamespace }} - {{- with $.Values.serviceAccounts.spark.annotations }} - annotations: {{ toYaml . | nindent 4 }} - {{- end }} - labels: {{ include "spark-operator.labels" $ | nindent 4 }} -{{- end }} -{{- end }} diff --git a/charts/spark-operator-chart/templates/spark/_helpers.tpl b/charts/spark-operator-chart/templates/spark/_helpers.tpl new file mode 100644 index 000000000..150ae966f --- /dev/null +++ b/charts/spark-operator-chart/templates/spark/_helpers.tpl @@ -0,0 +1,47 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{/* +Create the name of spark component +*/}} +{{- define "spark-operator.spark.name" -}} +{{- include "spark-operator.fullname" . }}-spark +{{- end -}} + +{{/* +Create the name of the service account to be used by spark applications +*/}} +{{- define "spark-operator.spark.serviceAccountName" -}} +{{- if .Values.spark.serviceAccount.create -}} +{{- .Values.spark.serviceAccount.name | default (include "spark-operator.spark.name" .) -}} +{{- else -}} +{{- .Values.spark.serviceAccount.name | default "default" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the role to be used by spark service account +*/}} +{{- define "spark-operator.spark.roleName" -}} +{{- include "spark-operator.spark.name" . }} +{{- end -}} + +{{/* +Create the name of the role binding to be used by spark service account +*/}} +{{- define "spark-operator.spark.roleBindingName" -}} +{{- include "spark-operator.spark.name" . }} +{{- end -}} diff --git a/charts/spark-operator-chart/templates/spark/rbac.yaml b/charts/spark-operator-chart/templates/spark/rbac.yaml new file mode 100644 index 000000000..e850b1e50 --- /dev/null +++ b/charts/spark-operator-chart/templates/spark/rbac.yaml @@ -0,0 +1,73 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.spark.rbac.create -}} +{{- range $jobNamespace := .Values.spark.jobNamespaces | default list }} +{{- if $jobNamespace }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "spark-operator.spark.roleName" $ }} + namespace: {{ $jobNamespace }} + labels: + {{- include "spark-operator.labels" $ | nindent 4 }} + {{- with $.Values.spark.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +rules: +- apiGroups: + - "" + resources: + - pods + - configmaps + - persistentvolumeclaims + - services + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "spark-operator.spark.roleBindingName" $ }} + namespace: {{ $jobNamespace }} + labels: + {{- include "spark-operator.labels" $ | nindent 4 }} + {{- with $.Values.spark.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "spark-operator.spark.serviceAccountName" $ }} + namespace: {{ $jobNamespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "spark-operator.spark.serviceAccountName" $ }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/spark/serviceaccount.yaml b/charts/spark-operator-chart/templates/spark/serviceaccount.yaml new file mode 100644 index 000000000..f05d8fae3 --- /dev/null +++ b/charts/spark-operator-chart/templates/spark/serviceaccount.yaml @@ -0,0 +1,30 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.spark.serviceAccount.create }} +{{- range $sparkJobNamespace := .Values.spark.jobNamespaces | default list }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "spark-operator.spark.serviceAccountName" $ }} + namespace: {{ $sparkJobNamespace }} + labels: {{ include "spark-operator.labels" $ | nindent 4 }} + {{- with $.Values.spark.serviceAccount.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/_helpers.tpl b/charts/spark-operator-chart/templates/webhook/_helpers.tpl index 960001129..71588123b 100644 --- a/charts/spark-operator-chart/templates/webhook/_helpers.tpl +++ b/charts/spark-operator-chart/templates/webhook/_helpers.tpl @@ -1,14 +1,113 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{/* +Create the name of webhook component +*/}} +{{- define "spark-operator.webhook.name" -}} +{{- include "spark-operator.fullname" . }}-webhook +{{- end -}} + +{{/* +Common labels for the webhook +*/}} +{{- define "spark-operator.webhook.labels" -}} +{{ include "spark-operator.labels" . }} +app.kubernetes.io/component: webhook +{{- end -}} + +{{/* +Selector labels for the webhook +*/}} +{{- define "spark-operator.webhook.selectorLabels" -}} +{{ include "spark-operator.selectorLabels" . }} +app.kubernetes.io/component: webhook +{{- end -}} + +{{/* +Create the name of service account to be used by webhook +*/}} +{{- define "spark-operator.webhook.serviceAccountName" -}} +{{- if .Values.webhook.serviceAccount.create -}} +{{ .Values.webhook.serviceAccount.name | default (include "spark-operator.webhook.name" .) }} +{{- else -}} +{{ .Values.webhook.serviceAccount.name | default "default" }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the role to be used by webhook +*/}} +{{- define "spark-operator.webhook.roleName" -}} +{{- include "spark-operator.webhook.name" . }} +{{- end -}} + +{{/* +Create the name of the role binding to be used by webhook +*/}} +{{- define "spark-operator.webhook.roleBindingName" -}} +{{- include "spark-operator.webhook.name" . }} +{{- end -}} + {{/* Create the name of the secret to be used by webhook */}} -{{- define "spark-operator.webhookSecretName" -}} -{{ include "spark-operator.fullname" . }}-webhook-certs +{{- define "spark-operator.webhook.secretName" -}} +{{ include "spark-operator.webhook.name" . }}-certs {{- end -}} {{/* Create the name of the service to be used by webhook */}} -{{- define "spark-operator.webhookServiceName" -}} -{{ include "spark-operator.fullname" . }}-webhook-svc +{{- define "spark-operator.webhook.serviceName" -}} +{{ include "spark-operator.webhook.name" . }}-svc +{{- end -}} + +{{/* +Create the name of mutating webhook configuration +*/}} +{{- define "spark-operator.mutatingWebhookConfigurationName" -}} +webhook.sparkoperator.k8s.io +{{- end -}} + +{{/* +Create the name of mutating webhook configuration +*/}} +{{- define "spark-operator.validatingWebhookConfigurationName" -}} +quotaenforcer.sparkoperator.k8s.io +{{- end -}} + +{{/* +Create the name of the deployment to be used by webhook +*/}} +{{- define "spark-operator.webhook.deploymentName" -}} +{{ include "spark-operator.webhook.name" . }} +{{- end -}} + +{{/* +Create the name of the lease resource to be used by leader election +*/}} +{{- define "spark-operator.webhook.leaderElectionName" -}} +{{ include "spark-operator.webhook.name" . }}-lock +{{- end -}} + +{{/* +Create the name of the pod disruption budget to be used by webhook +*/}} +{{- define "spark-operator.webhook.podDisruptionBudgetName" -}} +{{ include "spark-operator.webhook.name" . }}-pdb {{- end -}} diff --git a/charts/spark-operator-chart/templates/webhook/deployment.yaml b/charts/spark-operator-chart/templates/webhook/deployment.yaml new file mode 100644 index 000000000..89b07e3df --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/deployment.yaml @@ -0,0 +1,155 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "spark-operator.webhook.deploymentName" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} +spec: + {{- with .Values.webhook.replicas }} + replicas: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "spark-operator.webhook.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "spark-operator.webhook.selectorLabels" . | nindent 8 }} + {{- with .Values.webhook.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + containers: + - name: spark-operator-webhook + image: {{ include "spark-operator.image" . }} + {{- with .Values.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + args: + - webhook + - start + {{- with .Values.webhook.logLevel }} + - --zap-log-level={{ . }} + {{- end }} + {{- with .Values.spark.jobNamespaces }} + - --namespaces={{ . | join "," }} + {{- end }} + - --webhook-secret-name={{ include "spark-operator.webhook.secretName" . }} + - --webhook-secret-namespace={{ .Release.Namespace }} + - --webhook-svc-name={{ include "spark-operator.webhook.serviceName" . }} + - --webhook-svc-namespace={{ .Release.Namespace }} + - --webhook-port={{ .Values.webhook.port }} + - --mutating-webhook-name={{ include "spark-operator.webhook.name" . }} + - --validating-webhook-name={{ include "spark-operator.webhook.name" . }} + {{- with .Values.webhook.resourceQuotaEnforcement.enable }} + - --enable-resource-quota-enforcement=true + {{- end }} + {{- if .Values.prometheus.metrics.enable }} + - --enable-metrics=true + - --metrics-bind-address=:{{ .Values.prometheus.metrics.port }} + - --metrics-endpoint={{ .Values.prometheus.metrics.endpoint }} + - --metrics-prefix={{ .Values.prometheus.metrics.prefix }} + - --metrics-labels=app_type + {{- end }} + - --leader-election=true + - --leader-election-lock-name={{ include "spark-operator.webhook.leaderElectionName" . }} + - --leader-election-lock-namespace={{ .Release.Namespace }} + ports: + - name: {{ .Values.webhook.portName | quote }} + containerPort: {{ .Values.webhook.port }} + {{- if .Values.prometheus.metrics.enable }} + - name: {{ .Values.prometheus.metrics.portName | quote }} + containerPort: {{ .Values.prometheus.metrics.port }} + {{- end }} + {{- with .Values.webhook.env }} + env: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.envFrom }} + envFrom: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.webhook.resources }} + resources: + {{- toYaml . | nindent 10 }} + {{- end }} + livenessProbe: + httpGet: + port: 8081 + scheme: HTTP + path: /healthz + readinessProbe: + httpGet: + port: 8081 + scheme: HTTP + path: /readyz + {{- with .Values.webhook.securityContext }} + securityContext: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.webhook.sidecars }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + serviceAccountName: {{ include "spark-operator.webhook.serviceAccountName" . }} + {{- with .Values.webhook.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.webhook.topologySpreadConstraints }} + {{- if le (int .Values.webhook.replicas) 1 }} + {{- fail "webhook.replicas must be greater than 1 to enable topology spread constraints for webhook pods"}} + {{- end }} + {{- $selectorLabels := include "spark-operator.webhook.selectorLabels" . | fromYaml }} + {{- $labelSelectorDict := dict "labelSelector" ( dict "matchLabels" $selectorLabels ) }} + topologySpreadConstraints: + {{- range .Values.webhook.topologySpreadConstraints }} + - {{ mergeOverwrite . $labelSelectorDict | toYaml | nindent 8 | trim }} + {{- end }} + {{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/mutatingwebhookconfiguration.yaml b/charts/spark-operator-chart/templates/webhook/mutatingwebhookconfiguration.yaml new file mode 100644 index 000000000..f48a04320 --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/mutatingwebhookconfiguration.yaml @@ -0,0 +1,116 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} +webhooks: +- name: mutate--v1-pod.sparkoperator.k8s.io + admissionReviewVersions: ["v1"] + clientConfig: + service: + name: {{ include "spark-operator.webhook.serviceName" . }} + namespace: {{ .Release.Namespace }} + port: {{ .Values.webhook.port }} + path: /mutate--v1-pod + sideEffects: NoneOnDryRun + {{- with .Values.webhook.failurePolicy }} + failurePolicy: {{ . }} + {{- end }} + {{- if .Values.spark.jobNamespaces }} + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + {{- range .Values.spark.jobNamespaces }} + - {{ . }} + {{- end }} + {{- end }} + objectSelector: + matchLabels: + sparkoperator.k8s.io/launched-by-spark-operator: "true" + rules: + - apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] + operations: ["CREATE"] + {{- with .Values.webhook.timeoutSeconds }} + timeoutSeconds: {{ . }} + {{- end }} +- name: mutate-sparkoperator-k8s-io-v1beta2-sparkapplication.sparkoperator.k8s.io + admissionReviewVersions: ["v1"] + clientConfig: + service: + name: {{ include "spark-operator.webhook.serviceName" . }} + namespace: {{ .Release.Namespace }} + port: {{ .Values.webhook.port }} + path: /mutate-sparkoperator-k8s-io-v1beta2-sparkapplication + sideEffects: NoneOnDryRun + {{- with .Values.webhook.failurePolicy }} + failurePolicy: {{ . }} + {{- end }} + {{- if .Values.spark.jobNamespaces }} + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + {{- range .Values.spark.jobNamespaces }} + - {{ . }} + {{- end }} + {{- end }} + rules: + - apiGroups: ["sparkoperator.k8s.io"] + apiVersions: ["v1beta2"] + resources: ["sparkapplications"] + operations: ["CREATE", "UPDATE"] + {{- with .Values.webhook.timeoutSeconds }} + timeoutSeconds: {{ . }} + {{- end }} +- name: mutate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication.sparkoperator.k8s.io + admissionReviewVersions: ["v1"] + clientConfig: + service: + name: {{ include "spark-operator.webhook.serviceName" . }} + namespace: {{ .Release.Namespace }} + port: {{ .Values.webhook.port }} + path: /mutate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication + sideEffects: NoneOnDryRun + {{- with .Values.webhook.failurePolicy }} + failurePolicy: {{ . }} + {{- end }} + {{- if .Values.spark.jobNamespaces }} + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + {{- range .Values.spark.jobNamespaces }} + - {{ . }} + {{- end }} + {{- end }} + rules: + - apiGroups: ["sparkoperator.k8s.io"] + apiVersions: ["v1beta2"] + resources: ["scheduledsparkapplications"] + operations: ["CREATE", "UPDATE"] + {{- with .Values.webhook.timeoutSeconds }} + timeoutSeconds: {{ . }} + {{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/poddisruptionbudget.yaml b/charts/spark-operator-chart/templates/webhook/poddisruptionbudget.yaml new file mode 100644 index 000000000..6de7e6ef5 --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/poddisruptionbudget.yaml @@ -0,0 +1,34 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.webhook.podDisruptionBudget.enable }} +{{- if le (int .Values.webhook.replicas) 1 }} +{{- fail "webhook.replicas must be greater than 1 to enable pod disruption budget for webhook" }} +{{- end -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "spark-operator.webhook.podDisruptionBudgetName" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "spark-operator.webhook.selectorLabels" . | nindent 6 }} + {{- with .Values.webhook.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} + {{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/rbac.yaml b/charts/spark-operator-chart/templates/webhook/rbac.yaml new file mode 100644 index 000000000..b1c5d426f --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/rbac.yaml @@ -0,0 +1,171 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.webhook.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} + {{- with .Values.webhook.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - resourcequotas + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - update + - patch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - list + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + resourceNames: + - {{ include "spark-operator.webhook.name" . }} + verbs: + - get + - update +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + - sparkapplications/status + - sparkapplications/finalizers + - scheduledsparkapplications + - scheduledsparkapplications/status + - scheduledsparkapplications/finalizers + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} + {{- with .Values.webhook.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "spark-operator.webhook.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "spark-operator.webhook.name" . }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} + {{- with .Values.webhook.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create +- apiGroups: + - "" + resources: + - secrets + resourceNames: + - {{ include "spark-operator.webhook.secretName" . }} + verbs: + - get + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - {{ include "spark-operator.webhook.leaderElectionName" . }} + verbs: + - get + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} + {{- with .Values.webhook.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "spark-operator.webhook.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "spark-operator.webhook.name" . }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/secret.yaml b/charts/spark-operator-chart/templates/webhook/secret.yaml deleted file mode 100644 index 672738f2c..000000000 --- a/charts/spark-operator-chart/templates/webhook/secret.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.webhook.enable -}} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "spark-operator.webhookSecretName" . }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} -data: - ca-key.pem: "" - ca-cert.pem: "" - server-key.pem: "" - server-cert.pem: "" -{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/service.yaml b/charts/spark-operator-chart/templates/webhook/service.yaml index e31f8236b..45064a807 100644 --- a/charts/spark-operator-chart/templates/webhook/service.yaml +++ b/charts/spark-operator-chart/templates/webhook/service.yaml @@ -1,15 +1,29 @@ -{{- if .Values.webhook.enable -}} +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + apiVersion: v1 kind: Service metadata: - name: {{ include "spark-operator.webhookServiceName" . }} + name: {{ include "spark-operator.webhook.serviceName" . }} labels: - {{- include "spark-operator.labels" . | nindent 4 }} + {{- include "spark-operator.webhook.labels" . | nindent 4 }} spec: selector: - {{- include "spark-operator.selectorLabels" . | nindent 4 }} + {{- include "spark-operator.webhook.selectorLabels" . | nindent 4 }} ports: - - port: 443 + - port: {{ .Values.webhook.port }} targetPort: {{ .Values.webhook.portName | quote }} name: {{ .Values.webhook.portName }} -{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/serviceaccount.yaml b/charts/spark-operator-chart/templates/webhook/serviceaccount.yaml new file mode 100644 index 000000000..77944b83c --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/serviceaccount.yaml @@ -0,0 +1,28 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.webhook.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "spark-operator.webhook.serviceAccountName" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} + {{- with .Values.webhook.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/validatingwebhookconfiguration.yaml b/charts/spark-operator-chart/templates/webhook/validatingwebhookconfiguration.yaml new file mode 100644 index 000000000..3fbf55184 --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/validatingwebhookconfiguration.yaml @@ -0,0 +1,83 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} +webhooks: +- name: validate-sparkoperator-k8s-io-v1beta2-sparkapplication.sparkoperator.k8s.io + admissionReviewVersions: ["v1"] + clientConfig: + service: + name: {{ include "spark-operator.webhook.serviceName" . }} + namespace: {{ .Release.Namespace }} + port: {{ .Values.webhook.port }} + path: /validate-sparkoperator-k8s-io-v1beta2-sparkapplication + sideEffects: NoneOnDryRun + {{- with .Values.webhook.failurePolicy }} + failurePolicy: {{ . }} + {{- end }} + {{- if .Values.spark.jobNamespaces }} + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + {{- range .Values.spark.jobNamespaces }} + - {{ . }} + {{- end }} + {{- end }} + rules: + - apiGroups: ["sparkoperator.k8s.io"] + apiVersions: ["v1beta2"] + resources: ["sparkapplications"] + operations: ["CREATE", "UPDATE"] + {{- with .Values.webhook.timeoutSeconds }} + timeoutSeconds: {{ . }} + {{- end }} +- name: validate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication.sparkoperator.k8s.io + admissionReviewVersions: ["v1"] + clientConfig: + service: + name: {{ include "spark-operator.webhook.serviceName" . }} + namespace: {{ .Release.Namespace }} + port: {{ .Values.webhook.port }} + path: /validate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication + sideEffects: NoneOnDryRun + {{- with .Values.webhook.failurePolicy }} + failurePolicy: {{ . }} + {{- end }} + {{- if .Values.spark.jobNamespaces }} + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + {{- range .Values.spark.jobNamespaces }} + - {{ . }} + {{- end }} + {{- end }} + rules: + - apiGroups: ["sparkoperator.k8s.io"] + apiVersions: ["v1beta2"] + resources: ["scheduledsparkapplications"] + operations: ["CREATE", "UPDATE"] + {{- with .Values.webhook.timeoutSeconds }} + timeoutSeconds: {{ . }} + {{- end }} diff --git a/charts/spark-operator-chart/tests/controller/deployment_test.yaml b/charts/spark-operator-chart/tests/controller/deployment_test.yaml new file mode 100644 index 000000000..e4b6983a7 --- /dev/null +++ b/charts/spark-operator-chart/tests/controller/deployment_test.yaml @@ -0,0 +1,537 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test controller deployment + +templates: + - controller/deployment.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should use the specified image repository if `image.registry`, `image.repository` and `image.tag` are set + set: + image: + registry: test-registry + repository: test-repository + tag: test-tag + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: test-registry/test-repository:test-tag + + - it: Should use the specified image pull policy if `image.pullPolicy` is set + set: + image: + pullPolicy: Always + asserts: + - equal: + path: spec.template.spec.containers[*].imagePullPolicy + value: Always + + - it: Should set replicas if `controller.replicas` is set + set: + controller: + replicas: 10 + asserts: + - equal: + path: spec.replicas + value: 10 + + - it: Should add pod labels if `controller.labels` is set + set: + controller: + labels: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.metadata.labels.key1 + value: value1 + - equal: + path: spec.template.metadata.labels.key2 + value: value2 + + - it: Should add prometheus annotations if `metrics.enable` is true + set: + prometheus: + metrics: + enable: true + port: 10254 + endpoint: /metrics + asserts: + - equal: + path: spec.template.metadata.annotations["prometheus.io/scrape"] + value: "true" + - equal: + path: spec.template.metadata.annotations["prometheus.io/port"] + value: "10254" + - equal: + path: spec.template.metadata.annotations["prometheus.io/path"] + value: /metrics + + - it: Should add pod annotations if `controller.annotations` is set + set: + controller: + annotations: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.metadata.annotations.key1 + value: value1 + - equal: + path: spec.template.metadata.annotations.key2 + value: value2 + + - it: Should contain `--zap-log-level` arg if `controller.logLevel` is set + set: + controller: + logLevel: debug + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --zap-log-level=debug + + - it: Should contain `--namespaces` arg if `spark.jobNamespaces` is set + set: + spark.jobNamespaces: + - ns1 + - ns2 + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --namespaces=ns1,ns2 + + - it: Should contain `--controller-threads` arg if `controller.workers` is set + set: + controller: + workers: 30 + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --controller-threads=30 + + - it: Should contain `--enable-ui-service` arg if `controller.uiService.enable` is set to `true` + set: + controller: + uiService: + enable: true + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --enable-ui-service=true + + - it: Should contain `--ingress-url-format` arg if `controller.uiIngress.enable` is set to `true` and `controller.uiIngress.urlFormat` is set + set: + controller: + uiService: + enable: true + uiIngress: + enable: true + urlFormat: "{{$appName}}.example.com/{{$appNamespace}}/{{$appName}}" + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --ingress-url-format={{$appName}}.example.com/{{$appNamespace}}/{{$appName}} + + - it: Should contain `--enable-batch-scheduler` arg if `controller.batchScheduler.enable` is `true` + set: + controller: + batchScheduler: + enable: true + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --enable-batch-scheduler=true + + - it: Should contain `--enable-metrics` arg if `prometheus.metrics.enable` is set to `true` + set: + prometheus: + metrics: + enable: true + port: 12345 + portName: test-port + endpoint: /test-endpoint + prefix: test-prefix + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --enable-metrics=true + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --metrics-bind-address=:12345 + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --metrics-endpoint=/test-endpoint + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --metrics-prefix=test-prefix + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --metrics-labels=app_type + + - it: Should enable leader election by default + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --leader-election=true + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --leader-election-lock-name=spark-operator-controller-lock + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --leader-election-lock-namespace=spark-operator + + - it: Should add metric ports if `prometheus.metrics.enable` is true + set: + prometheus: + metrics: + enable: true + port: 10254 + portName: metrics + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: metrics + containerPort: 10254 + count: 1 + + - it: Should add environment variables if `controller.env` is set + set: + controller: + env: + - name: ENV_NAME_1 + value: ENV_VALUE_1 + - name: ENV_NAME_2 + valueFrom: + configMapKeyRef: + name: test-configmap + key: test-key + optional: false + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ENV_NAME_1 + value: ENV_VALUE_1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ENV_NAME_2 + valueFrom: + configMapKeyRef: + name: test-configmap + key: test-key + optional: false + + - it: Should add environment variable sources if `controller.envFrom` is set + set: + controller: + envFrom: + - configMapRef: + name: test-configmap + optional: false + - secretRef: + name: test-secret + optional: false + asserts: + - contains: + path: spec.template.spec.containers[0].envFrom + content: + configMapRef: + name: test-configmap + optional: false + - contains: + path: spec.template.spec.containers[0].envFrom + content: + secretRef: + name: test-secret + optional: false + + - it: Should add volume mounts if `controller.volumeMounts` is set + set: + controller: + volumeMounts: + - name: volume1 + mountPath: /volume1 + - name: volume2 + mountPath: /volume2 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: volume1 + mountPath: /volume1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: volume2 + mountPath: /volume2 + + - it: Should add resources if `controller.resources` is set + set: + controller: + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + asserts: + - equal: + path: spec.template.spec.containers[0].resources + value: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + + - it: Should add container securityContext if `controller.securityContext` is set + set: + controller: + securityContext: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + asserts: + - equal: + path: spec.template.spec.containers[0].securityContext + value: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + + - it: Should add sidecars if `controller.sidecars` is set + set: + controller: + sidecars: + - name: sidecar1 + image: sidecar-image1 + - name: sidecar2 + image: sidecar-image2 + asserts: + - contains: + path: spec.template.spec.containers + content: + name: sidecar1 + image: sidecar-image1 + - contains: + path: spec.template.spec.containers + content: + name: sidecar2 + image: sidecar-image2 + + - it: Should add secrets if `image.pullSecrets` is set + set: + image: + pullSecrets: + - name: test-secret1 + - name: test-secret2 + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: test-secret1 + - equal: + path: spec.template.spec.imagePullSecrets[1].name + value: test-secret2 + + - it: Should add volumes if `controller.volumes` is set + set: + controller: + volumes: + - name: volume1 + emptyDir: {} + - name: volume2 + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: volume1 + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: volume2 + emptyDir: {} + count: 1 + + - it: Should add nodeSelector if `controller.nodeSelector` is set + set: + controller: + nodeSelector: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.spec.nodeSelector.key1 + value: value1 + - equal: + path: spec.template.spec.nodeSelector.key2 + value: value2 + + - it: Should add affinity if `controller.affinity` is set + set: + controller: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - antarctica-east1 + - antarctica-west1 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value + asserts: + - equal: + path: spec.template.spec.affinity + value: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - antarctica-east1 + - antarctica-west1 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value + + - it: Should add tolerations if `controller.tolerations` is set + set: + controller: + tolerations: + - key: key1 + operator: Equal + value: value1 + effect: NoSchedule + - key: key2 + operator: Exists + effect: NoSchedule + asserts: + - equal: + path: spec.template.spec.tolerations + value: + - key: key1 + operator: Equal + value: value1 + effect: NoSchedule + - key: key2 + operator: Exists + effect: NoSchedule + + - it: Should add priorityClassName if `controller.priorityClassName` is set + set: + controller: + priorityClassName: test-priority-class + asserts: + - equal: + path: spec.template.spec.priorityClassName + value: test-priority-class + + - it: Should add pod securityContext if `controller.podSecurityContext` is set + set: + controller: + podSecurityContext: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + asserts: + - equal: + path: spec.template.spec.securityContext + value: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + + - it: Should not contain topologySpreadConstraints if `controller.topologySpreadConstraints` is not set + set: + controller: + topologySpreadConstraints: [] + asserts: + - notExists: + path: spec.template.spec.topologySpreadConstraints + + - it: Should add topologySpreadConstraints if `controller.topologySpreadConstraints` is set and `controller.replicas` is greater than 1 + set: + controller: + replicas: 2 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + asserts: + - equal: + path: spec.template.spec.topologySpreadConstraints + value: + - labelSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: spark-operator + app.kubernetes.io/name: spark-operator + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: spark-operator + app.kubernetes.io/name: spark-operator + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + + - it: Should fail if `controller.topologySpreadConstraints` is set and `controller.replicas` is not greater than 1 + set: + controller: + replicas: 1 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + asserts: + - failedTemplate: + errorMessage: "controller.replicas must be greater than 1 to enable topology spread constraints for controller pods" diff --git a/charts/spark-operator-chart/tests/controller/poddisruptionbudget_test.yaml b/charts/spark-operator-chart/tests/controller/poddisruptionbudget_test.yaml new file mode 100644 index 000000000..dd3a47bc5 --- /dev/null +++ b/charts/spark-operator-chart/tests/controller/poddisruptionbudget_test.yaml @@ -0,0 +1,68 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test controller pod disruption budget + +templates: + - controller/poddisruptionbudget.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not render podDisruptionBudget if `controller.podDisruptionBudget.enable` is false + set: + controller: + podDisruptionBudget: + enable: false + asserts: + - hasDocuments: + count: 0 + + - it: Should fail if `controller.replicas` is less than 2 when `controller.podDisruptionBudget.enable` is true + set: + controller: + replicas: 1 + podDisruptionBudget: + enable: true + asserts: + - failedTemplate: + errorMessage: "controller.replicas must be greater than 1 to enable pod disruption budget for controller" + + - it: Should render spark operator podDisruptionBudget if `controller.podDisruptionBudget.enable` is true + set: + controller: + replicas: 2 + podDisruptionBudget: + enable: true + asserts: + - containsDocument: + apiVersion: policy/v1 + kind: PodDisruptionBudget + name: spark-operator-controller-pdb + + - it: Should set minAvailable if `controller.podDisruptionBudget.minAvailable` is specified + set: + controller: + replicas: 2 + podDisruptionBudget: + enable: true + minAvailable: 3 + asserts: + - equal: + path: spec.minAvailable + value: 3 diff --git a/charts/spark-operator-chart/tests/controller/rbac_test.yaml b/charts/spark-operator-chart/tests/controller/rbac_test.yaml new file mode 100644 index 000000000..4a910adcb --- /dev/null +++ b/charts/spark-operator-chart/tests/controller/rbac_test.yaml @@ -0,0 +1,79 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test controller rbac + +templates: + - controller/rbac.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not create controller RBAC resources if `controller.rbac.create` is false + set: + controller: + rbac: + create: false + asserts: + - hasDocuments: + count: 0 + + - it: Should create controller ClusterRole by default + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + name: spark-operator-controller + + - it: Should create controller ClusterRoleBinding by default + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + name: spark-operator-controller + - contains: + path: subjects + content: + kind: ServiceAccount + name: spark-operator-controller + namespace: spark-operator + count: 1 + - equal: + path: roleRef + value: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: spark-operator-controller + + - it: Should add extra annotations to controller ClusterRole if `controller.rbac.annotations` is set + set: + controller: + rbac: + annotations: + key1: value1 + key2: value2 + documentIndex: 0 + asserts: + - equal: + path: metadata.annotations.key1 + value: value1 + - equal: + path: metadata.annotations.key2 + value: value2 diff --git a/charts/spark-operator-chart/tests/controller/serviceaccount_test.yaml b/charts/spark-operator-chart/tests/controller/serviceaccount_test.yaml new file mode 100644 index 000000000..4891a9a1b --- /dev/null +++ b/charts/spark-operator-chart/tests/controller/serviceaccount_test.yaml @@ -0,0 +1,67 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test controller service account + +templates: + - controller/serviceaccount.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not create controller service account if `controller.serviceAccount.create` is false + set: + controller: + serviceAccount: + create: false + asserts: + - hasDocuments: + count: 0 + + - it: Should create controller service account by default + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark-operator-controller + + - it: Should use the specified service account name if `controller.serviceAccount.name` is set + set: + controller: + serviceAccount: + name: custom-service-account + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: custom-service-account + + - it: Should add extra annotations if `controller.serviceAccount.annotations` is set + set: + controller: + serviceAccount: + annotations: + key1: value1 + key2: value2 + asserts: + - equal: + path: metadata.annotations.key1 + value: value1 + - equal: + path: metadata.annotations.key2 + value: value2 diff --git a/charts/spark-operator-chart/tests/deployment_test.yaml b/charts/spark-operator-chart/tests/deployment_test.yaml deleted file mode 100644 index 5debda193..000000000 --- a/charts/spark-operator-chart/tests/deployment_test.yaml +++ /dev/null @@ -1,301 +0,0 @@ -suite: Test spark operator deployment - -templates: - - deployment.yaml - -release: - name: spark-operator - -tests: - - it: Should contain namespace arg when sparkJobNamespaces is equal to 1 - set: - sparkJobNamespaces: - - ns1 - asserts: - - contains: - path: spec.template.spec.containers[0].args - content: -namespace=ns1 - - - it: Should add pod annotations if podAnnotations is set - set: - podAnnotations: - key1: value1 - key2: value2 - asserts: - - equal: - path: spec.template.metadata.annotations.key1 - value: value1 - - equal: - path: spec.template.metadata.annotations.key2 - value: value2 - - - it: Should add prometheus annotations if metrics.enable is true - set: - metrics: - enable: true - port: 10254 - endpoint: /metrics - asserts: - - equal: - path: spec.template.metadata.annotations["prometheus.io/scrape"] - value: "true" - - equal: - path: spec.template.metadata.annotations["prometheus.io/port"] - value: "10254" - - equal: - path: spec.template.metadata.annotations["prometheus.io/path"] - value: /metrics - - - it: Should add secrets if imagePullSecrets is set - set: - imagePullSecrets: - - name: test-secret1 - - name: test-secret2 - asserts: - - equal: - path: spec.template.spec.imagePullSecrets[0].name - value: test-secret1 - - equal: - path: spec.template.spec.imagePullSecrets[1].name - value: test-secret2 - - - it: Should add pod securityContext if podSecurityContext is set - set: - podSecurityContext: - runAsUser: 1000 - runAsGroup: 2000 - fsGroup: 3000 - asserts: - - equal: - path: spec.template.spec.securityContext.runAsUser - value: 1000 - - equal: - path: spec.template.spec.securityContext.runAsGroup - value: 2000 - - equal: - path: spec.template.spec.securityContext.fsGroup - value: 3000 - - - it: Should use the specified image repository if image.repository and image.tag is set - set: - image: - repository: test-repository - tag: test-tag - asserts: - - equal: - path: spec.template.spec.containers[0].image - value: test-repository:test-tag - - - it: Should use the specified image pull policy if image.pullPolicy is set - set: - image: - pullPolicy: Always - asserts: - - equal: - path: spec.template.spec.containers[0].imagePullPolicy - value: Always - - - it: Should add container securityContext if securityContext is set - set: - securityContext: - runAsUser: 1000 - runAsGroup: 2000 - fsGroup: 3000 - asserts: - - equal: - path: spec.template.spec.containers[0].securityContext.runAsUser - value: 1000 - - equal: - path: spec.template.spec.containers[0].securityContext.runAsGroup - value: 2000 - - equal: - path: spec.template.spec.containers[0].securityContext.fsGroup - value: 3000 - - - it: Should add metric ports if metrics.enable is true - set: - metrics: - enable: true - port: 10254 - portName: metrics - asserts: - - contains: - path: spec.template.spec.containers[0].ports - content: - name: metrics - containerPort: 10254 - count: 1 - - - it: Should add webhook ports if webhook.enable is true - set: - webhook: - enable: true - port: 8080 - portName: webhook - asserts: - - contains: - path: spec.template.spec.containers[0].ports - content: - name: webhook - containerPort: 8080 - count: 1 - - - it: Should add resources if resources is set - set: - resources: - requests: - memory: "64Mi" - cpu: "250m" - limits: - memory: "128Mi" - cpu: "500m" - asserts: - - equal: - path: spec.template.spec.containers[0].resources - value: - requests: - memory: "64Mi" - cpu: "250m" - limits: - memory: "128Mi" - cpu: "500m" - - - it: Should add sidecars if sidecars is set - set: - sidecars: - - name: sidecar1 - image: sidecar-image1 - - name: sidecar2 - image: sidecar-image2 - asserts: - - contains: - path: spec.template.spec.containers - content: - name: sidecar1 - image: sidecar-image1 - count: 1 - - contains: - path: spec.template.spec.containers - content: - name: sidecar2 - image: sidecar-image2 - count: 1 - - - it: Should add volumes if volumes is set - set: - volumes: - - name: volume1 - emptyDir: {} - - name: volume2 - emptyDir: {} - asserts: - - contains: - path: spec.template.spec.volumes - content: - name: volume1 - emptyDir: {} - count: 1 - - contains: - path: spec.template.spec.volumes - content: - name: volume2 - emptyDir: {} - count: 1 - - - it: Should add volume mounts if volumeMounts is set - set: - volumeMounts: - - name: volume1 - mountPath: /volume1 - - name: volume2 - mountPath: /volume2 - asserts: - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - name: volume1 - mountPath: /volume1 - count: 1 - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - name: volume2 - mountPath: /volume2 - count: 1 - - - it: Should add nodeSelector if nodeSelector is set - set: - nodeSelector: - key1: value1 - key2: value2 - asserts: - - equal: - path: spec.template.spec.nodeSelector.key1 - value: value1 - - equal: - path: spec.template.spec.nodeSelector.key2 - value: value2 - - - it: Should add affinity if affinity is set - set: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - antarctica-east1 - - antarctica-west1 - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: another-node-label-key - operator: In - values: - - another-node-label-value - asserts: - - equal: - path: spec.template.spec.affinity - value: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - antarctica-east1 - - antarctica-west1 - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: another-node-label-key - operator: In - values: - - another-node-label-value - - - it: Should add tolerations if tolerations is set - set: - tolerations: - - key: key1 - operator: Equal - value: value1 - effect: NoSchedule - - key: key2 - operator: Exists - effect: NoSchedule - asserts: - - equal: - path: spec.template.spec.tolerations - value: - - key: key1 - operator: Equal - value: value1 - effect: NoSchedule - - key: key2 - operator: Exists - effect: NoSchedule diff --git a/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml b/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml deleted file mode 100644 index 3f702fd10..000000000 --- a/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml +++ /dev/null @@ -1,37 +0,0 @@ -suite: Test spark operator podDisruptionBudget - -templates: - - poddisruptionbudget.yaml - -release: - name: spark-operator - -tests: - - it: Should not render spark operator podDisruptionBudget if podDisruptionBudget.enable is false - set: - podDisruptionBudget: - enable: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render spark operator podDisruptionBudget if podDisruptionBudget.enable is true - set: - podDisruptionBudget: - enable: true - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: policy/v1 - kind: PodDisruptionBudget - name: spark-operator-podDisruptionBudget - - - it: Should set minAvailable from values - set: - podDisruptionBudget: - enable: true - minAvailable: 3 - asserts: - - equal: - path: spec.template.minAvailable - value: 3 diff --git a/charts/spark-operator-chart/tests/prometheus/podmonitor_test.yaml b/charts/spark-operator-chart/tests/prometheus/podmonitor_test.yaml new file mode 100644 index 000000000..7e8bc54aa --- /dev/null +++ b/charts/spark-operator-chart/tests/prometheus/podmonitor_test.yaml @@ -0,0 +1,102 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test prometheus pod monitor + +templates: + - prometheus/podmonitor.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not create pod monitor by default + asserts: + - hasDocuments: + count: 0 + + - it: Should fail if `prometheus.podMonitor.create` is true and `prometheus.metrics.enable` is false + set: + prometheus: + metrics: + enable: false + podMonitor: + create: true + asserts: + - failedTemplate: + errorMessage: "`metrics.enable` must be set to true when `podMonitor.create` is true." + + - it: Should fail if the cluster does not support `monitoring.coreos.com/v1/PodMonitor` even if`prometheus.podMonitor.create` and `prometheus.metrics.enable` are both true + set: + prometheus: + metrics: + enable: true + podMonitor: + create: true + asserts: + - failedTemplate: + errorMessage: "The cluster does not support the required API version `monitoring.coreos.com/v1` for `PodMonitor`." + + - it: Should create pod monitor if the cluster support `monitoring.coreos.com/v1/PodMonitor` and `prometheus.podMonitor.create` and `prometheus.metrics.enable` are both true + capabilities: + apiVersions: + - monitoring.coreos.com/v1/PodMonitor + set: + prometheus: + metrics: + enable: true + podMonitor: + create: true + asserts: + - containsDocument: + apiVersion: monitoring.coreos.com/v1 + kind: PodMonitor + name: spark-operator-podmonitor + + - it: Should use the specified labels, jobLabel and podMetricsEndpoint + capabilities: + apiVersions: + - monitoring.coreos.com/v1/PodMonitor + set: + prometheus: + metrics: + enable: true + portName: custom-port + podMonitor: + create: true + labels: + key1: value1 + key2: value2 + jobLabel: custom-job-label + podMetricsEndpoint: + scheme: https + interval: 10s + asserts: + - equal: + path: metadata.labels + value: + key1: value1 + key2: value2 + - equal: + path: spec.podMetricsEndpoints[0] + value: + port: custom-port + scheme: https + interval: 10s + - equal: + path: spec.jobLabel + value: custom-job-label diff --git a/charts/spark-operator-chart/tests/rbac_test.yaml b/charts/spark-operator-chart/tests/rbac_test.yaml deleted file mode 100644 index f411c4def..000000000 --- a/charts/spark-operator-chart/tests/rbac_test.yaml +++ /dev/null @@ -1,90 +0,0 @@ -suite: Test spark operator rbac - -templates: - - rbac.yaml - -release: - name: spark-operator - -tests: - - it: Should not render spark operator rbac resources if rbac.create is false and rbac.createClusterRole is false - set: - rbac: - create: false - createClusterRole: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render spark operator cluster role if rbac.create is true - set: - rbac: - create: true - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - name: spark-operator - - - it: Should render spark operator cluster role if rbac.createClusterRole is true - set: - rbac: - createClusterRole: true - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - name: spark-operator - - - it: Should render spark operator cluster role binding if rbac.create is true - set: - rbac: - create: true - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - name: spark-operator - - - it: Should render spark operator cluster role binding correctly if rbac.createClusterRole is true - set: - rbac: - createClusterRole: true - release: - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - name: spark-operator - - contains: - path: subjects - content: - kind: ServiceAccount - name: spark-operator - namespace: NAMESPACE - count: 1 - - equal: - path: roleRef - value: - kind: ClusterRole - name: spark-operator - apiGroup: rbac.authorization.k8s.io - - - it: Should add extra annotations to spark operator cluster role if rbac.annotations is set - set: - rbac: - annotations: - key1: value1 - key2: value2 - documentIndex: 0 - asserts: - - equal: - path: metadata.annotations.key1 - value: value1 - - equal: - path: metadata.annotations.key2 - value: value2 diff --git a/charts/spark-operator-chart/tests/serviceaccount_test.yaml b/charts/spark-operator-chart/tests/serviceaccount_test.yaml deleted file mode 100644 index a9a1e39c6..000000000 --- a/charts/spark-operator-chart/tests/serviceaccount_test.yaml +++ /dev/null @@ -1,54 +0,0 @@ -suite: Test spark operator service account - -templates: - - serviceaccount.yaml - -release: - name: spark-operator - -tests: - - it: Should not render service account if serviceAccounts.sparkoperator.create is false - set: - serviceAccounts: - sparkoperator: - create: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render service account if serviceAccounts.sparkoperator.create is true - set: - serviceAccounts: - sparkoperator: - create: true - asserts: - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark-operator - - - it: Should use the specified service account name if serviceAccounts.sparkoperator.name is set - set: - serviceAccounts: - sparkoperator: - name: custom-service-account - asserts: - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: custom-service-account - - - it: Should add extra annotations if serviceAccounts.sparkoperator.annotations is set - set: - serviceAccounts: - sparkoperator: - annotations: - key1: value1 - key2: value2 - asserts: - - equal: - path: metadata.annotations.key1 - value: value1 - - equal: - path: metadata.annotations.key2 - value: value2 diff --git a/charts/spark-operator-chart/tests/spark-rbac_test.yaml b/charts/spark-operator-chart/tests/spark-rbac_test.yaml deleted file mode 100644 index 6d194fa3a..000000000 --- a/charts/spark-operator-chart/tests/spark-rbac_test.yaml +++ /dev/null @@ -1,133 +0,0 @@ -suite: Test spark rbac - -templates: - - spark-rbac.yaml - -release: - name: spark-operator - -tests: - - it: Should not render spark rbac resources if rbac.create is false and rbac.createRole is false - set: - rbac: - create: false - createRole: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render spark role if rbac.create is true - set: - rbac: - create: true - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - name: spark-role - - - it: Should render spark role if rbac.createRole is true - set: - rbac: - createRole: true - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - name: spark-role - - - it: Should render spark role binding if rbac.create is true - set: - rbac: - create: true - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - name: spark - - - it: Should render spark role binding if rbac.createRole is true - set: - rbac: - createRole: true - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - name: spark - - - it: Should create a single spark role with namespace "" by default - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - name: spark-role - namespace: "" - - - it: Should create a single spark role binding with namespace "" by default - values: - - ../values.yaml - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - name: spark - namespace: "" - - - it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values - set: - sparkJobNamespaces: - - ns1 - - ns2 - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - name: spark-role - namespace: ns1 - - - it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values - set: - sparkJobNamespaces: - - ns1 - - ns2 - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - name: spark - namespace: ns1 - - - it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values - set: - sparkJobNamespaces: - - ns1 - - ns2 - documentIndex: 2 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - name: spark-role - namespace: ns2 - - - it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values - set: - sparkJobNamespaces: - - ns1 - - ns2 - documentIndex: 3 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - name: spark - namespace: ns2 diff --git a/charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml b/charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml deleted file mode 100644 index f7140f84f..000000000 --- a/charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml +++ /dev/null @@ -1,112 +0,0 @@ -suite: Test spark service account - -templates: - - spark-serviceaccount.yaml - -release: - name: spark-operator - -tests: - - it: Should not render service account if serviceAccounts.spark.create is false - set: - serviceAccounts: - spark: - create: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render service account if serviceAccounts.spark.create is true - set: - serviceAccounts: - spark: - create: true - asserts: - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark-operator-spark - - - it: Should use the specified service account name if serviceAccounts.spark.name is set - set: - serviceAccounts: - spark: - name: spark - asserts: - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark - - - it: Should add extra annotations if serviceAccounts.spark.annotations is set - set: - serviceAccounts: - spark: - annotations: - key1: value1 - key2: value2 - asserts: - - equal: - path: metadata.annotations.key1 - value: value1 - - equal: - path: metadata.annotations.key2 - value: value2 - - - it: Should create multiple service accounts if sparkJobNamespaces is set - set: - serviceAccounts: - spark: - name: spark - sparkJobNamespaces: - - ns1 - - ns2 - - ns3 - documentIndex: 0 - asserts: - - hasDocuments: - count: 3 - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark - namespace: ns1 - - - - it: Should create multiple service accounts if sparkJobNamespaces is set - set: - serviceAccounts: - spark: - name: spark - sparkJobNamespaces: - - ns1 - - ns2 - - ns3 - documentIndex: 1 - asserts: - - hasDocuments: - count: 3 - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark - namespace: ns2 - - - it: Should create multiple service accounts if sparkJobNamespaces is set - set: - serviceAccounts: - spark: - name: spark - sparkJobNamespaces: - - ns1 - - ns2 - - ns3 - documentIndex: 2 - asserts: - - hasDocuments: - count: 3 - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark - namespace: ns3 diff --git a/charts/spark-operator-chart/tests/spark/rbac_test.yaml b/charts/spark-operator-chart/tests/spark/rbac_test.yaml new file mode 100644 index 000000000..2de678b54 --- /dev/null +++ b/charts/spark-operator-chart/tests/spark/rbac_test.yaml @@ -0,0 +1,123 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test spark rbac + +templates: + - spark/rbac.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not create spark RBAC resources if `spark.rbac.create` is false + set: + spark: + rbac: + create: false + asserts: + - hasDocuments: + count: 0 + + - it: Should create spark role by default + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-operator-spark + + - it: Should create spark role binding by default + set: + rbac: + spark: + create: true + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark-operator-spark + + - it: Should create a single spark role with namespace "" by default + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-operator-spark + + - it: Should create a single spark role binding with namespace "" by default + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark-operator-spark + namespace: "" + + - it: Should create multiple spark roles if `spark.jobNamespaces` is set with multiple values + set: + spark.jobNamespaces: + - ns1 + - ns2 + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-operator-spark + namespace: ns1 + + - it: Should create multiple spark role bindings if `spark.jobNamespaces` is set with multiple values + set: + spark.jobNamespaces: + - ns1 + - ns2 + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark-operator-spark + namespace: ns1 + + - it: Should create multiple spark roles if `spark.jobNamespaces` is set with multiple values + set: + spark.jobNamespaces: + - ns1 + - ns2 + documentIndex: 2 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-operator-spark + namespace: ns2 + + - it: Should create multiple spark role bindings if `spark.jobNamespaces` is set with multiple values + set: + spark.jobNamespaces: + - ns1 + - ns2 + documentIndex: 3 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark-operator-spark + namespace: ns2 diff --git a/charts/spark-operator-chart/tests/spark/serviceaccount_test.yaml b/charts/spark-operator-chart/tests/spark/serviceaccount_test.yaml new file mode 100644 index 000000000..a1f1898b4 --- /dev/null +++ b/charts/spark-operator-chart/tests/spark/serviceaccount_test.yaml @@ -0,0 +1,124 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test spark service account + +templates: + - spark/serviceaccount.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not create service account if `spark.serviceAccount.create` is false + set: + spark: + serviceAccount: + create: false + asserts: + - hasDocuments: + count: 0 + + - it: Should create service account by default + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark-operator-spark + + - it: Should use the specified service account name if `spark.serviceAccount.name` is set + set: + spark: + serviceAccount: + name: spark + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + + - it: Should add extra annotations if `spark.serviceAccount.annotations` is set + set: + spark: + serviceAccount: + annotations: + key1: value1 + key2: value2 + asserts: + - equal: + path: metadata.annotations.key1 + value: value1 + - equal: + path: metadata.annotations.key2 + value: value2 + + - it: Should create multiple service accounts if `spark.jobNamespaces` is set + set: + spark: + serviceAccount: + name: spark + jobNamespaces: + - ns1 + - ns2 + - ns3 + documentIndex: 0 + asserts: + - hasDocuments: + count: 3 + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + namespace: ns1 + + - it: Should create multiple service accounts if `spark.jobNamespaces` is set + set: + spark: + serviceAccount: + name: spark + jobNamespaces: + - ns1 + - ns2 + - ns3 + documentIndex: 1 + asserts: + - hasDocuments: + count: 3 + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + namespace: ns2 + + - it: Should create multiple service accounts if `spark.jobNamespaces` is set + set: + spark: + serviceAccount: + name: spark + jobNamespaces: + - ns1 + - ns2 + - ns3 + documentIndex: 2 + asserts: + - hasDocuments: + count: 3 + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + namespace: ns3 diff --git a/charts/spark-operator-chart/tests/webhook/deployment_test.yaml b/charts/spark-operator-chart/tests/webhook/deployment_test.yaml new file mode 100644 index 000000000..14c34f7a8 --- /dev/null +++ b/charts/spark-operator-chart/tests/webhook/deployment_test.yaml @@ -0,0 +1,504 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test webhook deployment + +templates: + - webhook/deployment.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should create webhook deployment by default + asserts: + - containsDocument: + apiVersion: apps/v1 + kind: Deployment + name: spark-operator-webhook + + - it: Should set replicas if `webhook.replicas` is set + set: + webhook: + replicas: 10 + asserts: + - equal: + path: spec.replicas + value: 10 + + - it: Should add pod labels if `webhook.labels` is set + set: + webhook: + labels: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.metadata.labels.key1 + value: value1 + - equal: + path: spec.template.metadata.labels.key2 + value: value2 + + - it: Should add pod annotations if `webhook.annotations` is set + set: + webhook: + annotations: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.metadata.annotations.key1 + value: value1 + - equal: + path: spec.template.metadata.annotations.key2 + value: value2 + + - it: Should use the specified image repository if `image.registry`, `image.repository` and `image.tag` are set + set: + image: + registry: test-registry + repository: test-repository + tag: test-tag + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: test-registry/test-repository:test-tag + + - it: Should use the specified image pull policy if `image.pullPolicy` is set + set: + image: + pullPolicy: Always + asserts: + - equal: + path: spec.template.spec.containers[0].imagePullPolicy + value: Always + + - it: Should contain `--zap-log-level` arg if `webhook.logLevel` is set + set: + webhook: + logLevel: debug + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --zap-log-level=debug + + - it: Should contain `--namespaces` arg if `spark.jobNamespaces` is set + set: + spark.jobNamespaces: + - ns1 + - ns2 + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --namespaces=ns1,ns2 + + - it: Should contain `--enable-metrics` arg if `prometheus.metrics.enable` is set to `true` + set: + prometheus: + metrics: + enable: true + port: 12345 + portName: test-port + endpoint: /test-endpoint + prefix: test-prefix + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --enable-metrics=true + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --metrics-bind-address=:12345 + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --metrics-endpoint=/test-endpoint + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --metrics-prefix=test-prefix + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --metrics-labels=app_type + + - it: Should enable leader election by default + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --leader-election=true + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --leader-election-lock-name=spark-operator-webhook-lock + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --leader-election-lock-namespace=spark-operator + + - it: Should add webhook port + set: + webhook: + port: 12345 + portName: test-port + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: test-port + containerPort: 12345 + + - it: Should add metric port if `prometheus.metrics.enable` is true + set: + prometheus: + metrics: + enable: true + port: 10254 + portName: metrics + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: metrics + containerPort: 10254 + count: 1 + + - it: Should add environment variables if `webhook.env` is set + set: + webhook: + env: + - name: ENV_NAME_1 + value: ENV_VALUE_1 + - name: ENV_NAME_2 + valueFrom: + configMapKeyRef: + name: test-configmap + key: test-key + optional: false + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ENV_NAME_1 + value: ENV_VALUE_1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ENV_NAME_2 + valueFrom: + configMapKeyRef: + name: test-configmap + key: test-key + optional: false + + - it: Should add environment variable sources if `webhook.envFrom` is set + set: + webhook: + envFrom: + - configMapRef: + name: test-configmap + optional: false + - secretRef: + name: test-secret + optional: false + asserts: + - contains: + path: spec.template.spec.containers[0].envFrom + content: + configMapRef: + name: test-configmap + optional: false + - contains: + path: spec.template.spec.containers[0].envFrom + content: + secretRef: + name: test-secret + optional: false + + - it: Should add volume mounts if `webhook.volumeMounts` is set + set: + webhook: + volumeMounts: + - name: volume1 + mountPath: /volume1 + - name: volume2 + mountPath: /volume2 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: volume1 + mountPath: /volume1 + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: volume2 + mountPath: /volume2 + count: 1 + + - it: Should add resources if `webhook.resources` is set + set: + webhook: + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + asserts: + - equal: + path: spec.template.spec.containers[0].resources + value: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + + - it: Should add container securityContext if `webhook.securityContext` is set + set: + webhook: + securityContext: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + asserts: + - equal: + path: spec.template.spec.containers[0].securityContext.runAsUser + value: 1000 + - equal: + path: spec.template.spec.containers[0].securityContext.runAsGroup + value: 2000 + - equal: + path: spec.template.spec.containers[0].securityContext.fsGroup + value: 3000 + + - it: Should add sidecars if `webhook.sidecars` is set + set: + webhook: + sidecars: + - name: sidecar1 + image: sidecar-image1 + - name: sidecar2 + image: sidecar-image2 + asserts: + - contains: + path: spec.template.spec.containers + content: + name: sidecar1 + image: sidecar-image1 + - contains: + path: spec.template.spec.containers + content: + name: sidecar2 + image: sidecar-image2 + + - it: Should add secrets if `image.pullSecrets` is set + set: + image: + pullSecrets: + - name: test-secret1 + - name: test-secret2 + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: test-secret1 + - equal: + path: spec.template.spec.imagePullSecrets[1].name + value: test-secret2 + + - it: Should add volumes if `webhook.volumes` is set + set: + webhook: + volumes: + - name: volume1 + emptyDir: {} + - name: volume2 + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: volume1 + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: volume2 + emptyDir: {} + count: 1 + + - it: Should add nodeSelector if `webhook.nodeSelector` is set + set: + webhook: + nodeSelector: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.spec.nodeSelector.key1 + value: value1 + - equal: + path: spec.template.spec.nodeSelector.key2 + value: value2 + + - it: Should add affinity if `webhook.affinity` is set + set: + webhook: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - antarctica-east1 + - antarctica-west1 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value + asserts: + - equal: + path: spec.template.spec.affinity + value: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - antarctica-east1 + - antarctica-west1 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value + + - it: Should add tolerations if `webhook.tolerations` is set + set: + webhook: + tolerations: + - key: key1 + operator: Equal + value: value1 + effect: NoSchedule + - key: key2 + operator: Exists + effect: NoSchedule + asserts: + - equal: + path: spec.template.spec.tolerations + value: + - key: key1 + operator: Equal + value: value1 + effect: NoSchedule + - key: key2 + operator: Exists + effect: NoSchedule + + - it: Should add priorityClassName if `webhook.priorityClassName` is set + set: + webhook: + priorityClassName: test-priority-class + asserts: + - equal: + path: spec.template.spec.priorityClassName + value: test-priority-class + + - it: Should add pod securityContext if `webhook.podSecurityContext` is set + set: + webhook: + podSecurityContext: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + asserts: + - equal: + path: spec.template.spec.securityContext.runAsUser + value: 1000 + - equal: + path: spec.template.spec.securityContext.runAsGroup + value: 2000 + - equal: + path: spec.template.spec.securityContext.fsGroup + value: 3000 + + - it: Should not contain topologySpreadConstraints if `webhook.topologySpreadConstraints` is not set + set: + webhook: + topologySpreadConstraints: [] + asserts: + - notExists: + path: spec.template.spec.topologySpreadConstraints + + - it: Should add topologySpreadConstraints if `webhook.topologySpreadConstraints` is set and `webhook.replicas` is greater than 1 + set: + webhook: + replicas: 2 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + asserts: + - equal: + path: spec.template.spec.topologySpreadConstraints + value: + - labelSelector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: spark-operator + app.kubernetes.io/name: spark-operator + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: spark-operator + app.kubernetes.io/name: spark-operator + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + + - it: Should fail if `webhook.topologySpreadConstraints` is set and `webhook.replicas` is not greater than 1 + set: + webhook: + replicas: 1 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + asserts: + - failedTemplate: + errorMessage: "webhook.replicas must be greater than 1 to enable topology spread constraints for webhook pods" diff --git a/charts/spark-operator-chart/tests/webhook/mutatingwebhookconfiguration_test.yaml b/charts/spark-operator-chart/tests/webhook/mutatingwebhookconfiguration_test.yaml new file mode 100644 index 000000000..54273df18 --- /dev/null +++ b/charts/spark-operator-chart/tests/webhook/mutatingwebhookconfiguration_test.yaml @@ -0,0 +1,78 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test mutating webhook configuration + +templates: + - webhook/mutatingwebhookconfiguration.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should create the mutating webhook configuration by default + asserts: + - containsDocument: + apiVersion: admissionregistration.k8s.io/v1 + kind: MutatingWebhookConfiguration + name: spark-operator-webhook + + - it: Should use the specified webhook port + set: + webhook: + port: 12345 + asserts: + - equal: + path: webhooks[*].clientConfig.service.port + value: 12345 + + - it: Should use the specified failure policy + set: + webhook: + failurePolicy: Fail + asserts: + - equal: + path: webhooks[*].failurePolicy + value: Fail + + - it: Should set namespaceSelector if sparkJobNamespaces is not empty + set: + spark: + jobNamespaces: + - ns1 + - ns2 + - ns3 + asserts: + - equal: + path: webhooks[*].namespaceSelector + value: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - ns1 + - ns2 + - ns3 + + - it: Should should use the specified timeoutSeconds + set: + webhook: + timeoutSeconds: 5 + asserts: + - equal: + path: webhooks[*].timeoutSeconds + value: 5 diff --git a/charts/spark-operator-chart/tests/webhook/poddisruptionbudget_test.yaml b/charts/spark-operator-chart/tests/webhook/poddisruptionbudget_test.yaml new file mode 100644 index 000000000..f45350dbb --- /dev/null +++ b/charts/spark-operator-chart/tests/webhook/poddisruptionbudget_test.yaml @@ -0,0 +1,68 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test webhook pod disruption budget + +templates: + - webhook/poddisruptionbudget.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not render podDisruptionBudget if `webhook.podDisruptionBudget.enable` is false + set: + webhook: + podDisruptionBudget: + enable: false + asserts: + - hasDocuments: + count: 0 + + - it: Should fail if `webhook.replicas` is less than 2 when `webhook.podDisruptionBudget.enable` is true + set: + webhook: + replicas: 1 + podDisruptionBudget: + enable: true + asserts: + - failedTemplate: + errorMessage: "webhook.replicas must be greater than 1 to enable pod disruption budget for webhook" + + - it: Should render spark operator podDisruptionBudget if `webhook.podDisruptionBudget.enable` is true + set: + webhook: + replicas: 2 + podDisruptionBudget: + enable: true + asserts: + - containsDocument: + apiVersion: policy/v1 + kind: PodDisruptionBudget + name: spark-operator-webhook-pdb + + - it: Should set minAvailable if `webhook.podDisruptionBudget.minAvailable` is specified + set: + webhook: + replicas: 2 + podDisruptionBudget: + enable: true + minAvailable: 3 + asserts: + - equal: + path: spec.minAvailable + value: 3 diff --git a/charts/spark-operator-chart/tests/webhook/secret_test.yaml b/charts/spark-operator-chart/tests/webhook/secret_test.yaml deleted file mode 100644 index 0e9c3b4cf..000000000 --- a/charts/spark-operator-chart/tests/webhook/secret_test.yaml +++ /dev/null @@ -1,31 +0,0 @@ -suite: Test spark operator webhook secret - -templates: - - webhook/secret.yaml - -release: - name: spark-operator - namespace: spark-operator - -tests: - - it: Should not render the webhook secret if webhook.enable is false - asserts: - - hasDocuments: - count: 0 - - - it: Should render the webhook secret with empty data fields - set: - webhook: - enable: true - asserts: - - containsDocument: - apiVersion: v1 - kind: Secret - name: spark-operator-webhook-certs - - equal: - path: data - value: - ca-key.pem: "" - ca-cert.pem: "" - server-key.pem: "" - server-cert.pem: "" diff --git a/charts/spark-operator-chart/tests/webhook/service_test.yaml b/charts/spark-operator-chart/tests/webhook/service_test.yaml index d3b6b1cc2..c06631f97 100644 --- a/charts/spark-operator-chart/tests/webhook/service_test.yaml +++ b/charts/spark-operator-chart/tests/webhook/service_test.yaml @@ -1,24 +1,32 @@ -suite: Test spark operator webhook service +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test webhook service templates: - webhook/service.yaml release: name: spark-operator + namespace: spark-operator tests: - - it: Should not render the webhook service if webhook.enable is false - set: - webhook: - enable: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render the webhook service correctly if webhook.enable is true + - it: Should create the webhook service correctly set: webhook: - enable: true portName: webhook asserts: - containsDocument: @@ -28,6 +36,6 @@ tests: - equal: path: spec.ports[0] value: - port: 443 + port: 9443 targetPort: webhook name: webhook diff --git a/charts/spark-operator-chart/tests/webhook/validatingwebhookconfiguration_test.yaml b/charts/spark-operator-chart/tests/webhook/validatingwebhookconfiguration_test.yaml new file mode 100644 index 000000000..9c7fa4daa --- /dev/null +++ b/charts/spark-operator-chart/tests/webhook/validatingwebhookconfiguration_test.yaml @@ -0,0 +1,77 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test validating webhook configuration + +templates: + - webhook/validatingwebhookconfiguration.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should create the validating webhook configuration by default + asserts: + - containsDocument: + apiVersion: admissionregistration.k8s.io/v1 + kind: ValidatingWebhookConfiguration + name: spark-operator-webhook + + - it: Should use the specified webhook port + set: + webhook: + port: 12345 + asserts: + - equal: + path: webhooks[*].clientConfig.service.port + value: 12345 + + - it: Should use the specified failure policy + set: + webhook: + failurePolicy: Fail + asserts: + - equal: + path: webhooks[*].failurePolicy + value: Fail + + - it: Should set namespaceSelector if `spark.jobNamespaces` is not empty + set: + spark.jobNamespaces: + - ns1 + - ns2 + - ns3 + asserts: + - equal: + path: webhooks[*].namespaceSelector + value: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - ns1 + - ns2 + - ns3 + + - it: Should should use the specified timeoutSeconds + set: + webhook: + timeoutSeconds: 5 + asserts: + - equal: + path: webhooks[*].timeoutSeconds + value: 5 diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 6eefe666b..a5adbe477 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -1,198 +1,328 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + # Default values for spark-operator. # This is a YAML-formatted file. # Declare variables to be passed into your templates. -# -- Common labels to add to the resources -commonLabels: {} - -# replicaCount -- Desired number of pods, leaderElection will be enabled -# if this is greater than 1 -replicaCount: 1 - -image: - # -- Image repository - repository: docker.io/kubeflow/spark-operator - # -- Image pull policy - pullPolicy: IfNotPresent - # -- if set, override the image tag whose default is the chart appVersion. - tag: "" - -# -- Image pull secrets -imagePullSecrets: [] - -# -- String to partially override `spark-operator.fullname` template (will maintain the release name) +# -- String to partially override release name. nameOverride: "" -# -- String to override release name +# -- String to fully override release name. fullnameOverride: "" -rbac: - # -- **DEPRECATED** use `createRole` and `createClusterRole` - create: false - # -- Create and use RBAC `Role` resources - createRole: true - # -- Create and use RBAC `ClusterRole` resources - createClusterRole: true - # -- Optional annotations for rbac - annotations: {} +# -- Common labels to add to the resources. +commonLabels: {} -serviceAccounts: - spark: - # -- Create a service account for spark apps +image: + # -- Image registry. + registry: docker.io + # -- Image repository. + repository: kubeflow/spark-operator + # -- Image tag. + # @default -- If not set, the chart appVersion will be used. + tag: "" + # -- Image pull policy. + pullPolicy: IfNotPresent + # -- Image pull secrets for private image registry. + pullSecrets: [] + # - name: + +controller: + # -- Number of replicas of controller. + replicas: 1 + + # -- Reconcile concurrency, higher values might increase memory usage. + workers: 10 + + # -- Configure the verbosity of logging, can be one of `debug`, `info`, `error`. + logLevel: info + + uiService: + # -- Specifies whether to create service for Spark web UI. + enable: true + + uiIngress: + # -- Specifies whether to create ingress for Spark web UI. + # `controller.uiService.enable` must be `true` to enable ingress. + enable: false + # -- Ingress URL format. + # Required if `controller.uiIngress.enable` is true. + urlFormat: "" + + batchScheduler: + # -- Specifies whether to enable batch scheduler for spark jobs scheduling. + # If enabled, users can specify batch scheduler name in spark application. + enable: false + + serviceAccount: + # -- Specifies whether to create a service account for the controller. create: true - # -- Optional name for the spark service account + # -- Optional name for the controller service account. name: "" - # -- Optional annotations for the spark service account + # -- Extra annotations for the controller service account. annotations: {} - sparkoperator: - # -- Create a service account for the operator + + rbac: + # -- Specifies whether to create RBAC resources for the controller. create: true - # -- Optional name for the operator service account - name: "" - # -- Optional annotations for the operator service account + # -- Extra annotations for the controller RBAC resources. annotations: {} -# -- List of namespaces where to run spark jobs -sparkJobNamespaces: - - "" -# - ns1 - -# -- Operator concurrency, higher values might increase memory usage -controllerThreads: 10 + # -- Extra labels for controller pods. + labels: {} + # key1: value1 + # key2: value2 -# -- Operator resync interval. Note that the operator will respond to events (e.g. create, update) -# unrelated to this setting -resyncInterval: 30 + # -- Extra annotations for controller pods. + annotations: {} + # key1: value1 + # key2: value2 + + # -- Volumes for controller pods. + volumes: [] + + # -- Node selector for controller pods. + nodeSelector: {} + + # -- Affinity for controller pods. + affinity: {} + + # -- List of node taints to tolerate for controller pods. + tolerations: [] + + # -- Priority class for controller pods. + priorityClassName: "" + + # -- Security context for controller pods. + podSecurityContext: {} + # runAsUser: 1000 + # runAsGroup: 2000 + # fsGroup: 3000 + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + # Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). + # The labelSelector field in topology spread constraint will be set to the selector labels for controller pods if not specified. + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: ScheduleAnyway + # - maxSkew: 1 + # topologyKey: kubernetes.io/hostname + # whenUnsatisfiable: DoNotSchedule + + # -- Environment variables for controller containers. + env: [] + + # -- Environment variable sources for controller containers. + envFrom: [] + + # -- Volume mounts for controller containers. + volumeMounts: [] + + # -- Pod resource requests and limits for controller containers. + # Note, that each job submission will spawn a JVM within the controller pods using "/usr/local/openjdk-11/bin/java -Xmx128m". + # Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: + # 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. + resources: {} + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 100m + # memory: 300Mi + + # -- Security context for controller containers. + securityContext: {} + # runAsUser: 1000 + # runAsGroup: 2000 + # fsGroup: 3000 + + # -- Sidecar containers for controller pods. + sidecars: [] + + # Pod disruption budget for controller to avoid service degradation. + podDisruptionBudget: + # -- Specifies whether to create pod disruption budget for controller. + # Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) + enable: false + # -- The number of pods that must be available. + # Require `controller.replicas` to be greater than 1 + minAvailable: 1 -uiService: - # -- Enable UI service creation for Spark application - enable: true +webhook: + # -- Number of replicas of webhook server. + replicas: 1 -# -- Ingress URL format. -# Requires the UI service to be enabled by setting `uiService.enable` to true. -ingressUrlFormat: "" + # -- Configure the verbosity of logging, can be one of `debug`, `info`, `error`. + logLevel: info -# -- Set higher levels for more verbose logging -logLevel: 2 + # -- Specifies webhook port. + port: 9443 -# -- Pod environment variable sources -envFrom: [] + # -- Specifies webhook service port name. + portName: webhook -# podSecurityContext -- Pod security context -podSecurityContext: {} + # -- Specifies how unrecognized errors are handled. + # Available options are `Ignore` or `Fail`. + failurePolicy: Fail -# securityContext -- Operator container security context -securityContext: {} + # -- Specifies the timeout seconds of the webhook, the value must be between 1 and 30. + timeoutSeconds: 10 -# sidecars -- Sidecar containers -sidecars: [] + resourceQuotaEnforcement: + # -- Specifies whether to enable the ResourceQuota enforcement for SparkApplication resources. + enable: false -# volumes - Operator volumes -volumes: [] + serviceAccount: + # -- Specifies whether to create a service account for the webhook. + create: true + # -- Optional name for the webhook service account. + name: "" + # -- Extra annotations for the webhook service account. + annotations: {} -# volumeMounts - Operator volumeMounts -volumeMounts: [] + rbac: + # -- Specifies whether to create RBAC resources for the webhook. + create: true + # -- Extra annotations for the webhook RBAC resources. + annotations: {} -webhook: - # -- Enable webhook server - enable: false - # -- Webhook service port - port: 8080 - # -- Webhook container port name and service target port name - portName: webhook - # -- The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. - # Empty string (default) will operate on all namespaces - namespaceSelector: "" - # -- The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). - # Empty string (default) will operate on all objects - objectSelector: "" - # -- The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade - timeout: 30 - -metrics: - # -- Enable prometheus metric scraping - enable: true - # -- Metrics port - port: 10254 - # -- Metrics port name - portName: metrics - # -- Metrics serving endpoint - endpoint: /metrics - # -- Metric prefix, will be added to all exported metrics - prefix: "" - -# -- Prometheus pod monitor for operator's pod. -podMonitor: - # -- If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. - enable: false - # -- Pod monitor labels + # -- Extra labels for webhook pods. labels: {} - # -- The label to use to retrieve the job name from - jobLabel: spark-operator-podmonitor - # -- Prometheus metrics endpoint properties. `metrics.portName` will be used as a port - podMetricsEndpoint: - scheme: http - interval: 5s - -# -- podDisruptionBudget to avoid service degradation -podDisruptionBudget: - # -- Specifies whether to enable pod disruption budget. - # Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) - enable: false - # -- The number of pods that must be available. - # Require `replicaCount` to be greater than 1 - minAvailable: 1 - -# nodeSelector -- Node labels for pod assignment -nodeSelector: {} - -# tolerations -- List of node taints to tolerate -tolerations: [] - -# affinity -- Affinity for pod assignment -affinity: {} - -# podAnnotations -- Additional annotations to add to the pod -podAnnotations: {} - -# podLabels -- Additional labels to add to the pod -podLabels: {} - -# resources -- Pod resource requests and limits -# Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". -# Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: -# 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. -resources: {} - # limits: - # cpu: 100m - # memory: 300Mi - # requests: - # cpu: 100m - # memory: 300Mi - -batchScheduler: - # -- Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application - enable: false - -resourceQuotaEnforcement: - # -- Whether to enable the ResourceQuota enforcement for SparkApplication resources. - # Requires the webhook to be enabled by setting `webhook.enable` to true. - # Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. - enable: false - -leaderElection: - # -- Leader election lock name. - # Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. - lockName: "spark-operator-lock" - # -- Optionally store the lock in another namespace. Defaults to operator's namespace - lockNamespace: "" - -istio: - # -- When using `istio`, spark jobs need to run without a sidecar to properly terminate - enabled: false - -# labelSelectorFilter -- A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. -labelSelectorFilter: "" - -# priorityClassName -- A priority class to be used for running spark-operator pod. -priorityClassName: "" + # key1: value1 + # key2: value2 + + # -- Extra annotations for webhook pods. + annotations: {} + # key1: value1 + # key2: value2 + + # -- Sidecar containers for webhook pods. + sidecars: [] + + # -- Volumes for webhook pods. + volumes: [] + + # -- Node selector for webhook pods. + nodeSelector: {} + + # -- Affinity for webhook pods. + affinity: {} + + # -- List of node taints to tolerate for webhook pods. + tolerations: [] + + # -- Priority class for webhook pods. + priorityClassName: "" + + # -- Security context for webhook pods. + podSecurityContext: {} + # runAsUser: 1000 + # runAsGroup: 2000 + # fsGroup: 3000 + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + # Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). + # The labelSelector field in topology spread constraint will be set to the selector labels for webhook pods if not specified. + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: ScheduleAnyway + # - maxSkew: 1 + # topologyKey: kubernetes.io/hostname + # whenUnsatisfiable: DoNotSchedule + + # -- Environment variables for webhook containers. + env: [] + + # -- Environment variable sources for webhook containers. + envFrom: [] + + # -- Volume mounts for webhook containers. + volumeMounts: [] + + # -- Pod resource requests and limits for webhook pods. + resources: {} + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 100m + # memory: 300Mi + + # -- Security context for webhook containers. + securityContext: {} + # runAsUser: 1000 + # runAsGroup: 2000 + # fsGroup: 3000 + + # Pod disruption budget for webhook to avoid service degradation. + podDisruptionBudget: + # -- Specifies whether to create pod disruption budget for webhook. + # Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) + enable: false + # -- The number of pods that must be available. + # Require `webhook.replicas` to be greater than 1 + minAvailable: 1 + +spark: + # -- List of namespaces where to run spark jobs. + # If empty string is included, all namespaces will be allowed. + # Make sure the namespaces have already existed. + jobNamespaces: + - default + + serviceAccount: + # -- Specifies whether to create a service account for spark applications. + create: true + # -- Optional name for the spark service account. + name: "" + # -- Optional annotations for the spark service account. + annotations: {} + + rbac: + # -- Specifies whether to create RBAC resources for spark applications. + create: true + # -- Optional annotations for the spark application RBAC resources. + annotations: {} + +prometheus: + metrics: + # -- Specifies whether to enable prometheus metrics scraping. + enable: true + # -- Metrics port. + port: 8080 + # -- Metrics port name. + portName: metrics + # -- Metrics serving endpoint. + endpoint: /metrics + # -- Metrics prefix, will be added to all exported metrics. + prefix: "" + + # Prometheus pod monitor for controller pods + podMonitor: + # -- Specifies whether to create pod monitor. + # Note that prometheus metrics should be enabled as well. + create: false + # -- Pod monitor labels + labels: {} + # -- The label to use to retrieve the job name from + jobLabel: spark-operator-podmonitor + # -- Prometheus metrics endpoint properties. `metrics.portName` will be used as a port + podMetricsEndpoint: + scheme: http + interval: 5s diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 000000000..38085497b --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,31 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os" + + "github.com/kubeflow/spark-operator/cmd/operator" +) + +func main() { + if err := operator.NewCommand().Execute(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} diff --git a/pkg/batchscheduler/interface/interface.go b/cmd/operator/controller/root.go similarity index 60% rename from pkg/batchscheduler/interface/interface.go rename to cmd/operator/controller/root.go index 6ed18c8cd..eeaa8edcd 100644 --- a/pkg/batchscheduler/interface/interface.go +++ b/cmd/operator/controller/root.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -package schedulerinterface +package controller import ( - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/spf13/cobra" ) -type BatchScheduler interface { - Name() string - - ShouldSchedule(app *v1beta2.SparkApplication) bool - DoBatchSchedulingOnSubmission(app *v1beta2.SparkApplication) error - CleanupOnCompletion(app *v1beta2.SparkApplication) error +func NewCommand() *cobra.Command { + command := &cobra.Command{ + Use: "controller", + Short: "Spark operator controller", + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() + }, + } + command.AddCommand(NewStartCommand()) + return command } diff --git a/cmd/operator/controller/start.go b/cmd/operator/controller/start.go new file mode 100644 index 000000000..8fb54d7ea --- /dev/null +++ b/cmd/operator/controller/start.go @@ -0,0 +1,364 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "crypto/tls" + "flag" + "os" + "time" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/clock" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/healthz" + logzap "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" + + sparkoperator "github.com/kubeflow/spark-operator" + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/controller/scheduledsparkapplication" + "github.com/kubeflow/spark-operator/internal/controller/sparkapplication" + "github.com/kubeflow/spark-operator/internal/metrics" + "github.com/kubeflow/spark-operator/internal/scheduler" + "github.com/kubeflow/spark-operator/internal/scheduler/volcano" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + logger = ctrl.Log.WithName("") +) + +var ( + namespaces []string + + // Controller + controllerThreads int + cacheSyncTimeout time.Duration + + // Batch scheduler + enableBatchScheduler bool + + // Spark web UI service and ingress + enableUIService bool + ingressClassName string + ingressURLFormat string + + // Leader election + enableLeaderElection bool + leaderElectionLockName string + leaderElectionLockNamespace string + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + + // Metrics + enableMetrics bool + metricsBindAddress string + metricsEndpoint string + metricsPrefix string + metricsLabels []string + metricsJobStartLatencyBuckets []float64 + + healthProbeBindAddress string + secureMetrics bool + enableHTTP2 bool + development bool + zapOptions = logzap.Options{} +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(v1beta2.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func NewStartCommand() *cobra.Command { + var command = &cobra.Command{ + Use: "start", + Short: "Start controller and webhook", + PreRun: func(_ *cobra.Command, args []string) { + development = viper.GetBool("development") + }, + Run: func(_ *cobra.Command, args []string) { + sparkoperator.PrintVersion(false) + start() + }, + } + + command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.") + command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.") + command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.") + + command.Flags().BoolVar(&enableBatchScheduler, "enable-batch-scheduler", false, "Enable batch schedulers.") + command.Flags().BoolVar(&enableUIService, "enable-ui-service", true, "Enable Spark Web UI service.") + command.Flags().StringVar(&ingressClassName, "ingress-class-name", "", "Set ingressClassName for ingress resources created.") + command.Flags().StringVar(&ingressURLFormat, "ingress-url-format", "", "Ingress URL format.") + + command.Flags().BoolVar(&enableLeaderElection, "leader-election", false, "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + command.Flags().StringVar(&leaderElectionLockName, "leader-election-lock-name", "spark-operator-lock", "Name of the ConfigMap for leader election.") + command.Flags().StringVar(&leaderElectionLockNamespace, "leader-election-lock-namespace", "spark-operator", "Namespace in which to create the ConfigMap for leader election.") + command.Flags().DurationVar(&leaderElectionLeaseDuration, "leader-election-lease-duration", 15*time.Second, "Leader election lease duration.") + command.Flags().DurationVar(&leaderElectionRenewDeadline, "leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.") + command.Flags().DurationVar(&leaderElectionRetryPeriod, "leader-election-retry-period", 4*time.Second, "Leader election retry period.") + + command.Flags().BoolVar(&enableMetrics, "enable-metrics", false, "Enable metrics.") + command.Flags().StringVar(&metricsBindAddress, "metrics-bind-address", "0", "The address the metric endpoint binds to. "+ + "Use the port :8080. If not set, it will be 0 in order to disable the metrics server") + command.Flags().StringVar(&metricsEndpoint, "metrics-endpoint", "/metrics", "Metrics endpoint.") + command.Flags().StringVar(&metricsPrefix, "metrics-prefix", "", "Prefix for the metrics.") + command.Flags().StringSliceVar(&metricsLabels, "metrics-labels", []string{}, "Labels to be added to the metrics.") + command.Flags().Float64SliceVar(&metricsJobStartLatencyBuckets, "metrics-job-start-latency-buckets", []float64{30, 60, 90, 120, 150, 180, 210, 240, 270, 300}, "Buckets for the job start latency histogram.") + + command.Flags().StringVar(&healthProbeBindAddress, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + command.Flags().BoolVar(&secureMetrics, "secure-metrics", false, "If set the metrics endpoint is served securely") + command.Flags().BoolVar(&enableHTTP2, "enable-http2", false, "If set, HTTP/2 will be enabled for the metrics and webhook servers") + + flagSet := flag.NewFlagSet("controller", flag.ExitOnError) + ctrl.RegisterFlags(flagSet) + zapOptions.BindFlags(flagSet) + command.Flags().AddGoFlagSet(flagSet) + + return command +} + +func start() { + setupLog() + + // Create the client rest config. Use kubeConfig if given, otherwise assume in-cluster. + cfg, err := ctrl.GetConfig() + if err != nil { + logger.Error(err, "failed to get kube config") + os.Exit(1) + } + + // Create the manager. + tlsOptions := newTLSOptions() + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + Cache: newCacheOptions(), + Metrics: metricsserver.Options{ + BindAddress: metricsBindAddress, + SecureServing: secureMetrics, + TLSOpts: tlsOptions, + }, + WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{ + TLSOpts: tlsOptions, + }), + HealthProbeBindAddress: healthProbeBindAddress, + LeaderElection: enableLeaderElection, + LeaderElectionID: leaderElectionLockName, + LeaderElectionNamespace: leaderElectionLockNamespace, + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + logger.Error(err, "failed to create manager") + os.Exit(1) + } + + var registry *scheduler.Registry + if enableBatchScheduler { + registry = scheduler.GetRegistry() + + // Register volcano scheduler. + registry.Register(common.VolcanoSchedulerName, volcano.Factory) + } + + // Setup controller for SparkApplication. + if err = sparkapplication.NewReconciler( + mgr, + mgr.GetScheme(), + mgr.GetClient(), + mgr.GetEventRecorderFor("spark-application-controller"), + registry, + newSparkApplicationReconcilerOptions(), + ).SetupWithManager(mgr, newControllerOptions()); err != nil { + logger.Error(err, "Failed to create controller", "controller", "SparkApplication") + os.Exit(1) + } + + // Setup controller for ScheduledSparkApplication. + if err = scheduledsparkapplication.NewReconciler( + mgr.GetScheme(), + mgr.GetClient(), + mgr.GetEventRecorderFor("scheduled-spark-application-controller"), + clock.RealClock{}, + newScheduledSparkApplicationReconcilerOptions(), + ).SetupWithManager(mgr, newControllerOptions()); err != nil { + logger.Error(err, "Failed to create controller", "controller", "ScheduledSparkApplication") + os.Exit(1) + } + + // +kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + logger.Error(err, "Failed to set up health check") + os.Exit(1) + } + + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + logger.Error(err, "Failed to set up ready check") + os.Exit(1) + } + + logger.Info("Starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + logger.Error(err, "Failed to start manager") + os.Exit(1) + } +} + +// setupLog Configures the logging system +func setupLog() { + ctrl.SetLogger(logzap.New( + logzap.UseFlagOptions(&zapOptions), + func(o *logzap.Options) { + o.Development = development + }, func(o *logzap.Options) { + o.ZapOpts = append(o.ZapOpts, zap.AddCaller()) + }, func(o *logzap.Options) { + var config zapcore.EncoderConfig + if !development { + config = zap.NewProductionEncoderConfig() + } else { + config = zap.NewDevelopmentEncoderConfig() + } + config.EncodeLevel = zapcore.CapitalColorLevelEncoder + config.EncodeTime = zapcore.ISO8601TimeEncoder + config.EncodeCaller = zapcore.ShortCallerEncoder + o.Encoder = zapcore.NewConsoleEncoder(config) + }), + ) +} + +func newTLSOptions() []func(c *tls.Config) { + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + logger.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + tlsOpts := []func(*tls.Config){} + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + return tlsOpts +} + +// newCacheOptions creates and returns a cache.Options instance configured with default namespaces and object caching settings. +func newCacheOptions() cache.Options { + defaultNamespaces := make(map[string]cache.Config) + if util.ContainsString(namespaces, cache.AllNamespaces) { + defaultNamespaces[cache.AllNamespaces] = cache.Config{} + } else { + for _, ns := range namespaces { + defaultNamespaces[ns] = cache.Config{} + } + } + + options := cache.Options{ + Scheme: scheme, + DefaultNamespaces: defaultNamespaces, + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: { + Label: labels.SelectorFromSet(labels.Set{ + common.LabelLaunchedBySparkOperator: "true", + }), + }, + &corev1.ConfigMap{}: {}, + &corev1.PersistentVolumeClaim{}: {}, + &corev1.Service{}: {}, + &v1beta2.SparkApplication{}: {}, + }, + } + + return options +} + +// newControllerOptions creates and returns a controller.Options instance configured with the given options. +func newControllerOptions() controller.Options { + options := controller.Options{ + MaxConcurrentReconciles: controllerThreads, + CacheSyncTimeout: cacheSyncTimeout, + } + return options +} + +func newSparkApplicationReconcilerOptions() sparkapplication.Options { + var sparkApplicationMetrics *metrics.SparkApplicationMetrics + var sparkExecutorMetrics *metrics.SparkExecutorMetrics + if enableMetrics { + sparkApplicationMetrics = metrics.NewSparkApplicationMetrics(metricsPrefix, metricsLabels, metricsJobStartLatencyBuckets) + sparkApplicationMetrics.Register() + sparkExecutorMetrics = metrics.NewSparkExecutorMetrics(metricsPrefix, metricsLabels) + sparkExecutorMetrics.Register() + } + options := sparkapplication.Options{ + Namespaces: namespaces, + EnableUIService: enableUIService, + IngressClassName: ingressClassName, + IngressURLFormat: ingressURLFormat, + SparkApplicationMetrics: sparkApplicationMetrics, + SparkExecutorMetrics: sparkExecutorMetrics, + } + return options +} + +func newScheduledSparkApplicationReconcilerOptions() scheduledsparkapplication.Options { + options := scheduledsparkapplication.Options{ + Namespaces: namespaces, + } + return options +} diff --git a/cmd/operator/root.go b/cmd/operator/root.go new file mode 100644 index 000000000..2ddaa900d --- /dev/null +++ b/cmd/operator/root.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operator + +import ( + "github.com/spf13/cobra" + + "github.com/kubeflow/spark-operator/cmd/operator/controller" + "github.com/kubeflow/spark-operator/cmd/operator/version" + "github.com/kubeflow/spark-operator/cmd/operator/webhook" +) + +func NewCommand() *cobra.Command { + command := &cobra.Command{ + Use: "spark-operator", + Short: "Spark operator", + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() + }, + } + command.AddCommand(controller.NewCommand()) + command.AddCommand(webhook.NewCommand()) + command.AddCommand(version.NewCommand()) + return command +} diff --git a/pkg/controller/scheduledsparkapplication/controller_util.go b/cmd/operator/version/root.go similarity index 53% rename from pkg/controller/scheduledsparkapplication/controller_util.go rename to cmd/operator/version/root.go index 8cb33ab74..331bd612c 100644 --- a/pkg/controller/scheduledsparkapplication/controller_util.go +++ b/cmd/operator/version/root.go @@ -1,5 +1,5 @@ /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,23 +14,27 @@ See the License for the specific language governing permissions and limitations under the License. */ -package scheduledsparkapplication +package version import ( - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -type sparkApps []*v1beta2.SparkApplication + "github.com/spf13/cobra" -func (s sparkApps) Len() int { - return len(s) -} + sparkoperator "github.com/kubeflow/spark-operator" +) -func (s sparkApps) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} +var ( + short bool +) -func (s sparkApps) Less(i, j int) bool { - // Sort by decreasing order of application names and correspondingly creation time. - return s[i].Name > s[j].Name +func NewCommand() *cobra.Command { + command := &cobra.Command{ + Use: "version", + Short: "Print version information", + RunE: func(cmd *cobra.Command, args []string) error { + sparkoperator.PrintVersion(short) + return nil + }, + } + command.Flags().BoolVar(&short, "short", false, "Print just the version string.") + return command } diff --git a/cmd/operator/webhook/root.go b/cmd/operator/webhook/root.go new file mode 100644 index 000000000..47609ea49 --- /dev/null +++ b/cmd/operator/webhook/root.go @@ -0,0 +1,33 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "github.com/spf13/cobra" +) + +func NewCommand() *cobra.Command { + command := &cobra.Command{ + Use: "webhook", + Short: "Spark operator webhook", + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() + }, + } + command.AddCommand(NewStartCommand()) + return command +} diff --git a/cmd/operator/webhook/start.go b/cmd/operator/webhook/start.go new file mode 100644 index 000000000..23ef7ae48 --- /dev/null +++ b/cmd/operator/webhook/start.go @@ -0,0 +1,410 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "crypto/tls" + "flag" + "os" + "time" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + logzap "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" + + sparkoperator "github.com/kubeflow/spark-operator" + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/controller/mutatingwebhookconfiguration" + "github.com/kubeflow/spark-operator/internal/controller/validatingwebhookconfiguration" + "github.com/kubeflow/spark-operator/internal/webhook" + "github.com/kubeflow/spark-operator/pkg/certificate" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + logger = ctrl.Log.WithName("") +) + +var ( + namespaces []string + labelSelectorFilter string + + // Controller + controllerThreads int + cacheSyncTimeout time.Duration + + // Webhook + enableResourceQuotaEnforcement bool + webhookCertDir string + webhookCertName string + webhookKeyName string + mutatingWebhookName string + validatingWebhookName string + webhookPort int + webhookSecretName string + webhookSecretNamespace string + webhookServiceName string + webhookServiceNamespace string + + // Leader election + enableLeaderElection bool + leaderElectionLockName string + leaderElectionLockNamespace string + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + + // Metrics + enableMetrics bool + metricsBindAddress string + metricsEndpoint string + metricsPrefix string + metricsLabels []string + + healthProbeBindAddress string + secureMetrics bool + enableHTTP2 bool + development bool + zapOptions = logzap.Options{} +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(v1beta2.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func NewStartCommand() *cobra.Command { + var command = &cobra.Command{ + Use: "start", + Short: "Start controller and webhook", + PreRun: func(_ *cobra.Command, args []string) { + development = viper.GetBool("development") + }, + Run: func(cmd *cobra.Command, args []string) { + sparkoperator.PrintVersion(false) + start() + }, + } + + command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.") + command.Flags().StringSliceVar(&namespaces, "namespaces", []string{"default"}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.") + command.Flags().StringVar(&labelSelectorFilter, "label-selector-filter", "", "A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels.") + command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.") + + command.Flags().StringVar(&webhookCertDir, "webhook-cert-dir", "/etc/k8s-webhook-server/serving-certs", "The directory that contains the webhook server key and certificate") + command.Flags().StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The file name of webhook server certificate.") + command.Flags().StringVar(&webhookKeyName, "webhook-key-name", "tls.key", "The file name of webhook server key.") + command.Flags().StringVar(&mutatingWebhookName, "mutating-webhook-name", "spark-operator-webhook", "The name of the mutating webhook.") + command.Flags().StringVar(&validatingWebhookName, "validating-webhook-name", "spark-operator-webhook", "The name of the validating webhook.") + command.Flags().IntVar(&webhookPort, "webhook-port", 9443, "Service port of the webhook server.") + command.Flags().StringVar(&webhookSecretName, "webhook-secret-name", "spark-operator-webhook-certs", "The name of the secret that contains the webhook server's TLS certificate and key.") + command.Flags().StringVar(&webhookSecretNamespace, "webhook-secret-namespace", "spark-operator", "The namespace of the secret that contains the webhook server's TLS certificate and key.") + command.Flags().StringVar(&webhookServiceName, "webhook-svc-name", "spark-webhook", "The name of the Service for the webhook server.") + command.Flags().StringVar(&webhookServiceNamespace, "webhook-svc-namespace", "spark-webhook", "The name of the Service for the webhook server.") + command.Flags().BoolVar(&enableResourceQuotaEnforcement, "enable-resource-quota-enforcement", false, "Whether to enable ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled.") + + command.Flags().BoolVar(&enableLeaderElection, "leader-election", false, "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + command.Flags().StringVar(&leaderElectionLockName, "leader-election-lock-name", "spark-operator-lock", "Name of the ConfigMap for leader election.") + command.Flags().StringVar(&leaderElectionLockNamespace, "leader-election-lock-namespace", "spark-operator", "Namespace in which to create the ConfigMap for leader election.") + command.Flags().DurationVar(&leaderElectionLeaseDuration, "leader-election-lease-duration", 15*time.Second, "Leader election lease duration.") + command.Flags().DurationVar(&leaderElectionRenewDeadline, "leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.") + command.Flags().DurationVar(&leaderElectionRetryPeriod, "leader-election-retry-period", 4*time.Second, "Leader election retry period.") + + command.Flags().BoolVar(&enableMetrics, "enable-metrics", false, "Enable metrics.") + command.Flags().StringVar(&metricsBindAddress, "metrics-bind-address", "0", "The address the metric endpoint binds to. "+ + "Use the port :8080. If not set, it will be 0 in order to disable the metrics server") + command.Flags().StringVar(&metricsEndpoint, "metrics-endpoint", "/metrics", "Metrics endpoint.") + command.Flags().StringVar(&metricsPrefix, "metrics-prefix", "", "Prefix for the metrics.") + command.Flags().StringSliceVar(&metricsLabels, "metrics-labels", []string{}, "Labels to be added to the metrics.") + + command.Flags().StringVar(&healthProbeBindAddress, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + command.Flags().BoolVar(&secureMetrics, "secure-metrics", false, "If set the metrics endpoint is served securely") + command.Flags().BoolVar(&enableHTTP2, "enable-http2", false, "If set, HTTP/2 will be enabled for the metrics and webhook servers") + + flagSet := flag.NewFlagSet("controller", flag.ExitOnError) + ctrl.RegisterFlags(flagSet) + zapOptions.BindFlags(flagSet) + command.Flags().AddGoFlagSet(flagSet) + + return command +} + +func start() { + setupLog() + + // Create the client rest config. Use kubeConfig if given, otherwise assume in-cluster. + cfg, err := ctrl.GetConfig() + if err != nil { + logger.Error(err, "failed to get kube config") + os.Exit(1) + } + + // Create the manager. + tlsOptions := newTLSOptions() + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + Cache: newCacheOptions(), + Metrics: metricsserver.Options{ + BindAddress: metricsBindAddress, + SecureServing: secureMetrics, + TLSOpts: tlsOptions, + }, + WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{ + Port: webhookPort, + CertDir: webhookCertDir, + CertName: webhookCertName, + TLSOpts: tlsOptions, + }), + HealthProbeBindAddress: healthProbeBindAddress, + LeaderElection: enableLeaderElection, + LeaderElectionID: leaderElectionLockName, + LeaderElectionNamespace: leaderElectionLockNamespace, + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + logger.Error(err, "Failed to create manager") + os.Exit(1) + } + + client, err := client.New(cfg, client.Options{Scheme: mgr.GetScheme()}) + if err != nil { + logger.Error(err, "Failed to create client") + os.Exit(1) + } + + certProvider := certificate.NewProvider( + client, + webhookServiceName, + webhookServiceNamespace, + ) + + if err := wait.ExponentialBackoff( + wait.Backoff{ + Steps: 5, + Duration: 1 * time.Second, + Factor: 2.0, + Jitter: 0.1, + }, + func() (bool, error) { + logger.Info("Syncing webhook secret", "name", webhookSecretName, "namespace", webhookSecretNamespace) + if err := certProvider.SyncSecret(context.TODO(), webhookSecretName, webhookSecretNamespace); err != nil { + if errors.IsAlreadyExists(err) || errors.IsConflict(err) { + return false, nil + } + return false, err + } + return true, nil + }, + ); err != nil { + logger.Error(err, "Failed to sync webhook secret") + os.Exit(1) + } + + logger.Info("Writing certificates", "path", webhookCertDir, "certificate name", webhookCertName, "key name", webhookKeyName) + if err := certProvider.WriteFile(webhookCertDir, webhookCertName, webhookKeyName); err != nil { + logger.Error(err, "Failed to save certificate") + os.Exit(1) + } + + if err := mutatingwebhookconfiguration.NewReconciler( + mgr.GetClient(), + certProvider, + mutatingWebhookName, + ).SetupWithManager(mgr, controller.Options{}); err != nil { + logger.Error(err, "Failed to create controller", "controller", "MutatingWebhookConfiguration") + os.Exit(1) + } + + if err := validatingwebhookconfiguration.NewReconciler( + mgr.GetClient(), + certProvider, + validatingWebhookName, + ).SetupWithManager(mgr, controller.Options{}); err != nil { + logger.Error(err, "Failed to create controller", "controller", "ValidatingWebhookConfiguration") + os.Exit(1) + } + + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1beta2.SparkApplication{}). + WithDefaulter(webhook.NewSparkApplicationDefaulter()). + WithValidator(webhook.NewSparkApplicationValidator(mgr.GetClient(), enableResourceQuotaEnforcement)). + Complete(); err != nil { + logger.Error(err, "Failed to create mutating webhook for Spark application") + os.Exit(1) + } + + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1beta2.ScheduledSparkApplication{}). + WithDefaulter(webhook.NewScheduledSparkApplicationDefaulter()). + WithValidator(webhook.NewScheduledSparkApplicationValidator()). + Complete(); err != nil { + logger.Error(err, "Failed to create mutating webhook for Scheduled Spark application") + os.Exit(1) + } + + if err := ctrl.NewWebhookManagedBy(mgr). + For(&corev1.Pod{}). + WithDefaulter(webhook.NewSparkPodDefaulter(mgr.GetClient(), namespaces)). + Complete(); err != nil { + logger.Error(err, "Failed to create mutating webhook for Spark pod") + os.Exit(1) + } + + // +kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", mgr.GetWebhookServer().StartedChecker()); err != nil { + logger.Error(err, "Failed to set up health check") + os.Exit(1) + } + + if err := mgr.AddReadyzCheck("readyz", mgr.GetWebhookServer().StartedChecker()); err != nil { + logger.Error(err, "Failed to set up ready check") + os.Exit(1) + } + + logger.Info("Starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + logger.Error(err, "Failed to start manager") + os.Exit(1) + } +} + +// setupLog Configures the logging system +func setupLog() { + ctrl.SetLogger(logzap.New( + logzap.UseFlagOptions(&zapOptions), + func(o *logzap.Options) { + o.Development = development + }, func(o *logzap.Options) { + o.ZapOpts = append(o.ZapOpts, zap.AddCaller()) + }, func(o *logzap.Options) { + var config zapcore.EncoderConfig + if !development { + config = zap.NewProductionEncoderConfig() + } else { + config = zap.NewDevelopmentEncoderConfig() + } + config.EncodeLevel = zapcore.CapitalColorLevelEncoder + config.EncodeTime = zapcore.ISO8601TimeEncoder + config.EncodeCaller = zapcore.ShortCallerEncoder + o.Encoder = zapcore.NewConsoleEncoder(config) + }), + ) +} + +func newTLSOptions() []func(c *tls.Config) { + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + logger.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + tlsOpts := []func(*tls.Config){} + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + return tlsOpts +} + +// newCacheOptions creates and returns a cache.Options instance configured with default namespaces and object caching settings. +func newCacheOptions() cache.Options { + defaultNamespaces := make(map[string]cache.Config) + if util.ContainsString(namespaces, cache.AllNamespaces) { + defaultNamespaces[cache.AllNamespaces] = cache.Config{} + } else { + for _, ns := range namespaces { + defaultNamespaces[ns] = cache.Config{} + } + } + + byObject := map[client.Object]cache.ByObject{ + &corev1.Pod{}: { + Label: labels.SelectorFromSet(labels.Set{ + common.LabelLaunchedBySparkOperator: "true", + }), + }, + &v1beta2.SparkApplication{}: {}, + &v1beta2.ScheduledSparkApplication{}: {}, + &admissionregistrationv1.MutatingWebhookConfiguration{}: { + Field: fields.SelectorFromSet(fields.Set{ + "metadata.name": mutatingWebhookName, + }), + }, + &admissionregistrationv1.ValidatingWebhookConfiguration{}: { + Field: fields.SelectorFromSet(fields.Set{ + "metadata.name": validatingWebhookName, + }), + }, + } + + if enableResourceQuotaEnforcement { + byObject[&corev1.ResourceQuota{}] = cache.ByObject{} + } + + options := cache.Options{ + Scheme: scheme, + DefaultNamespaces: defaultNamespaces, + ByObject: byObject, + } + + return options +} diff --git a/codecov.yaml b/codecov.yaml new file mode 100644 index 000000000..4e7d7af67 --- /dev/null +++ b/codecov.yaml @@ -0,0 +1,10 @@ +coverage: + status: + project: + default: + threshold: 0.1% + patch: + default: + target: 60% +ignore: + - "**/*_generated.*" diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml new file mode 100644 index 000000000..fbfd47519 --- /dev/null +++ b/config/certmanager/certificate.yaml @@ -0,0 +1,35 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + app.kubernetes.io/name: certificate + app.kubernetes.io/instance: serving-cert + app.kubernetes.io/component: certificate + app.kubernetes.io/created-by: spark-operator + app.kubernetes.io/part-of: spark-operator + app.kubernetes.io/managed-by: kustomize + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize + dnsNames: + - SERVICE_NAME.SERVICE_NAMESPACE.svc + - SERVICE_NAME.SERVICE_NAMESPACE.svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml new file mode 100644 index 000000000..bebea5a59 --- /dev/null +++ b/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 000000000..cf6f89e88 --- /dev/null +++ b/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,8 @@ +# This configuration is for teaching kustomize how to update name ref substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name diff --git a/config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml new file mode 100644 index 000000000..7f77e1bb9 --- /dev/null +++ b/config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -0,0 +1,11614 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 + controller-gen.kubebuilder.io/version: v0.15.0 + name: scheduledsparkapplications.sparkoperator.k8s.io +spec: + group: sparkoperator.k8s.io + names: + kind: ScheduledSparkApplication + listKind: ScheduledSparkApplicationList + plural: scheduledsparkapplications + shortNames: + - scheduledsparkapp + singular: scheduledsparkapplication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.schedule + name: Schedule + type: string + - jsonPath: .spec.suspend + name: Suspend + type: string + - jsonPath: .status.lastRun + name: Last Run + type: date + - jsonPath: .status.lastRunName + name: Last Run Name + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ScheduledSparkApplication is the Schema for the scheduledsparkapplications + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScheduledSparkApplicationSpec defines the desired state of + ScheduledSparkApplication. + properties: + concurrencyPolicy: + description: ConcurrencyPolicy is the policy governing concurrent + SparkApplication runs. + type: string + failedRunHistoryLimit: + description: |- + FailedRunHistoryLimit is the number of past failed runs of the application to keep. + Defaults to 1. + format: int32 + type: integer + schedule: + description: Schedule is a cron schedule on which the application + should run. + type: string + successfulRunHistoryLimit: + description: |- + SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. + Defaults to 1. + format: int32 + type: integer + suspend: + description: |- + Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. + Defaults to false. + type: boolean + template: + description: Template is a template from which SparkApplication instances + can be created. + properties: + arguments: + description: Arguments is a list of arguments to be passed to + the application. + items: + type: string + type: array + batchScheduler: + description: BatchScheduler configures which batch scheduler will + be used for scheduling + type: string + batchSchedulerOptions: + description: BatchSchedulerOptions provides fine-grained control + on how to batch scheduling. + properties: + priorityClassName: + description: PriorityClassName stands for the name of k8s + PriorityClass resource, it's being used in Volcano batch + scheduler. + type: string + queue: + description: Queue stands for the resource queue which the + application belongs to, it's being used in Volcano batch + scheduler. + type: string + resources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. + If specified, volcano scheduler will consider it as the resources requested. + type: object + type: object + deps: + description: Deps captures all possible types of dependencies + of a Spark application. + properties: + excludePackages: + description: |- + ExcludePackages is a list of "groupId:artifactId", to exclude while resolving the + dependencies provided in Packages to avoid dependency conflicts. + items: + type: string + type: array + files: + description: Files is a list of files the Spark application + depends on. + items: + type: string + type: array + jars: + description: Jars is a list of JAR files the Spark application + depends on. + items: + type: string + type: array + packages: + description: |- + Packages is a list of maven coordinates of jars to include on the driver and executor + classpaths. This will search the local maven repo, then maven central and any additional + remote repositories given by the "repositories" option. + Each package should be of the form "groupId:artifactId:version". + items: + type: string + type: array + pyFiles: + description: PyFiles is a list of Python files the Spark application + depends on. + items: + type: string + type: array + repositories: + description: |- + Repositories is a list of additional remote repositories to search for the maven coordinate + given with the "packages" option. + items: + type: string + type: array + type: object + driver: + description: Driver is the driver specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity + settings for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + description: Annotations are the Kubernetes annotations to + be added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to + which the named objects should be mounted to. + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the driver. + Maps to `spark.kubernetes.driver.request.cores` that is available since Spark 3.0. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + dnsConfig: + description: DnsConfig dns settings for the pod, following + the Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add + to the pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request + for driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the + Kubernetes specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host + networking for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides + Spec.Image if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the driver. For instance, + GC settings or other logging. + type: string + kubernetesMaster: + description: |- + KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and + other Kubernetes resources. Default to https://kubernetes.default.svc. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added + to the pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for + the pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory + to allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podName: + description: |- + PodName is the name of the driver pod that the user creates. This is used for the + in-cluster client mode in which the user creates a client pod where the driver of + the user application runs. It's an error to set this field if Mode is not + in-cluster-client. + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the + pods objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will + be used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add + to the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: |- + ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + serviceLabels: + additionalProperties: + type: string + description: |- + ServiceLabels defines the labels to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that + run along side the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed + in ".spec.tolerations" to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in + ".spec.volumes" to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + driverIngressOptions: + description: DriverIngressOptions allows configuring the Service + and the Ingress to expose ports inside Spark Driver + items: + description: DriverIngressConfiguration is for driver ingress + specific configuration parameters. + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL + certificates to the ingress object + items: + description: IngressTLS describes the transport layer + security associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + ingressURLFormat: + description: IngressURLFormat is the URL for the ingress. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLabels is a map of key,value pairs of + labels that might be added to the service object. + type: object + servicePort: + description: ServicePort allows configuring the port at + service level that might be different from the targetPort. + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + type: string + serviceType: + description: ServiceType allows configuring the type of + the service. Defaults to ClusterIP. + type: string + required: + - servicePort + - servicePortName + type: object + type: array + dynamicAllocation: + description: |- + DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes + scheduler backend since Spark 3.0. + properties: + enabled: + description: Enabled controls whether dynamic allocation is + enabled or not. + type: boolean + initialExecutors: + description: |- + InitialExecutors is the initial number of executors to request. If .spec.executor.instances + is also set, the initial number of executors is set to the bigger of that and this option. + format: int32 + type: integer + maxExecutors: + description: MaxExecutors is the upper bound for the number + of executors if dynamic allocation is enabled. + format: int32 + type: integer + minExecutors: + description: MinExecutors is the lower bound for the number + of executors if dynamic allocation is enabled. + format: int32 + type: integer + shuffleTrackingTimeout: + description: |- + ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding + shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled). + format: int64 + type: integer + type: object + executor: + description: Executor is the executor specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity + settings for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + description: Annotations are the Kubernetes annotations to + be added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to + which the named objects should be mounted to. + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the executors. + Maps to `spark.kubernetes.executor.request.cores` that is available since Spark 2.4. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + deleteOnTermination: + description: |- + DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. + Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. + type: boolean + dnsConfig: + description: DnsConfig dns settings for the pod, following + the Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add + to the pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request + for driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the + Kubernetes specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host + networking for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides + Spec.Image if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + instances: + description: Instances is the number of executor instances. + format: int32 + minimum: 1 + type: integer + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the executors. For instance, + GC settings or other logging. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added + to the pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for + the pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory + to allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the + pods objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will + be used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add + to the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that + run along side the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed + in ".spec.tolerations" to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in + ".spec.volumes" to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + failureRetries: + description: |- + FailureRetries is the number of times to retry a failed application before giving up. + This is best effort and actual retry attempts can be >= the value specified. + format: int32 + type: integer + hadoopConf: + additionalProperties: + type: string + description: |- + HadoopConf carries user-specified Hadoop configuration properties as they would use the the "--conf" option + in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop + configuration properties. + type: object + hadoopConfigMap: + description: |- + HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. + The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + image: + description: |- + Image is the container image for the driver, executor, and init-container. Any custom container images for the + driver, executor, or init-container takes precedence over this. + type: string + imagePullPolicy: + description: ImagePullPolicy is the image pull policy for the + driver, executor, and init-container. + type: string + imagePullSecrets: + description: ImagePullSecrets is the list of image-pull secrets. + items: + type: string + type: array + mainApplicationFile: + description: MainFile is the path to a bundled JAR, Python, or + R file of the application. + type: string + mainClass: + description: |- + MainClass is the fully-qualified main class of the Spark application. + This only applies to Java/Scala Spark applications. + type: string + memoryOverheadFactor: + description: |- + This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. + For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will + be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set. + type: string + mode: + description: Mode is the deployment mode of the Spark application. + enum: + - cluster + - client + type: string + monitoring: + description: Monitoring configures how monitoring is handled. + properties: + exposeDriverMetrics: + description: ExposeDriverMetrics specifies whether to expose + metrics on the driver. + type: boolean + exposeExecutorMetrics: + description: ExposeExecutorMetrics specifies whether to expose + metrics on the executors. + type: boolean + metricsProperties: + description: |- + MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. + If not specified, the content in spark-docker/conf/metrics.properties will be used. + type: string + metricsPropertiesFile: + description: |- + MetricsPropertiesFile is the container local path of file metrics.properties for configuring + the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used. + type: string + prometheus: + description: Prometheus is for configuring the Prometheus + JMX exporter. + properties: + configFile: + description: |- + ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. + ConfigFile takes precedence over Configuration, which is shown below. + type: string + configuration: + description: |- + Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. + If not specified, the content in spark-docker/conf/prometheus.yaml will be used. + Configuration has no effect if ConfigFile is set. + type: string + jmxExporterJar: + description: JmxExporterJar is the path to the Prometheus + JMX exporter jar in the container. + type: string + port: + description: |- + Port is the port of the HTTP server run by the Prometheus JMX exporter. + If not specified, 8090 will be used as the default. + format: int32 + maximum: 49151 + minimum: 1024 + type: integer + portName: + description: |- + PortName is the port name of prometheus JMX exporter port. + If not specified, jmx-exporter will be used as the default. + type: string + required: + - jmxExporterJar + type: object + required: + - exposeDriverMetrics + - exposeExecutorMetrics + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). + This field will be deprecated in future versions (at SparkApplicationSpec level). + type: object + proxyUser: + description: |- + ProxyUser specifies the user to impersonate when submitting the application. + It maps to the command-line flag "--proxy-user" in spark-submit. + type: string + pythonVersion: + description: |- + This sets the major Python version of the docker + image used to run the driver and executor containers. Can either be 2 or 3, default 2. + enum: + - "2" + - "3" + type: string + restartPolicy: + description: RestartPolicy defines the policy on if and in which + conditions the controller should restart an application. + properties: + onFailureRetries: + description: OnFailureRetries the number of times to retry + running an application before giving up. + format: int32 + minimum: 0 + type: integer + onFailureRetryInterval: + description: OnFailureRetryInterval is the interval in seconds + between retries on failed runs. + format: int64 + minimum: 1 + type: integer + onSubmissionFailureRetries: + description: |- + OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. + This is best effort and actual retry attempts can be >= the value specified due to caching. + These are required if RestartPolicy is OnFailure. + format: int32 + minimum: 0 + type: integer + onSubmissionFailureRetryInterval: + description: OnSubmissionFailureRetryInterval is the interval + in seconds between retries on failed submissions. + format: int64 + minimum: 1 + type: integer + type: + description: Type specifies the RestartPolicyType. + enum: + - Never + - Always + - OnFailure + type: string + type: object + retryInterval: + description: RetryInterval is the unit of intervals in seconds + between submission retries. + format: int64 + type: integer + sparkConf: + additionalProperties: + type: string + description: |- + SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in + spark-submit. + type: object + sparkConfigMap: + description: |- + SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. + The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + sparkUIOptions: + description: SparkUIOptions allows configuring the Service and + the Ingress to expose the sparkUI + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL + certificates to the ingress object + items: + description: IngressTLS describes the transport layer security + associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLabels is a map of key,value pairs of + labels that might be added to the service object. + type: object + servicePort: + description: |- + ServicePort allows configuring the port at service level that might be different from the targetPort. + TargetPort should be the same as the one defined in spark.ui.port + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + Defaults to spark-driver-ui-port. + type: string + serviceType: + description: ServiceType allows configuring the type of the + service. Defaults to ClusterIP. + type: string + type: object + sparkVersion: + description: SparkVersion is the version of Spark the application + uses. + type: string + timeToLiveSeconds: + description: |- + TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication + after its termination. + The SparkApplication object will be garbage collected if the current time is more than the + TimeToLiveSeconds since its termination. + format: int64 + type: integer + type: + description: Type tells the type of the Spark application. + enum: + - Java + - Python + - Scala + - R + type: string + volumes: + description: Volumes is the list of Kubernetes volumes that can + be mounted by the driver and/or executors. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in + the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the + blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure managed + data disk (only in managed availability set). defaults + to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use + for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 + encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool + associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - driver + - executor + - sparkVersion + - type + type: object + required: + - schedule + - template + type: object + status: + description: ScheduledSparkApplicationStatus defines the observed state + of ScheduledSparkApplication. + properties: + lastRun: + description: LastRun is the time when the last run of the application + started. + format: date-time + nullable: true + type: string + lastRunName: + description: LastRunName is the name of the SparkApplication for the + most recent run of the application. + type: string + nextRun: + description: NextRun is the time when the next run of the application + will start. + format: date-time + nullable: true + type: string + pastFailedRunNames: + description: PastFailedRunNames keeps the names of SparkApplications + for past failed runs. + items: + type: string + type: array + pastSuccessfulRunNames: + description: PastSuccessfulRunNames keeps the names of SparkApplications + for past successful runs. + items: + type: string + type: array + reason: + description: Reason tells why the ScheduledSparkApplication is in + the particular ScheduleState. + type: string + scheduleState: + description: ScheduleState is the current scheduling state of the + application. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml b/config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml new file mode 100644 index 000000000..afc07c253 --- /dev/null +++ b/config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml @@ -0,0 +1,11548 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 + controller-gen.kubebuilder.io/version: v0.15.0 + name: sparkapplications.sparkoperator.k8s.io +spec: + group: sparkoperator.k8s.io + names: + kind: SparkApplication + listKind: SparkApplicationList + plural: sparkapplications + shortNames: + - sparkapp + singular: sparkapplication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.applicationState.state + name: Status + type: string + - jsonPath: .status.executionAttempts + name: Attempts + type: string + - jsonPath: .status.lastSubmissionAttemptTime + name: Start + type: string + - jsonPath: .status.terminationTime + name: Finish + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SparkApplication is the Schema for the sparkapplications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + SparkApplicationSpec defines the desired state of SparkApplication + It carries every pieces of information a spark-submit command takes and recognizes. + properties: + arguments: + description: Arguments is a list of arguments to be passed to the + application. + items: + type: string + type: array + batchScheduler: + description: BatchScheduler configures which batch scheduler will + be used for scheduling + type: string + batchSchedulerOptions: + description: BatchSchedulerOptions provides fine-grained control on + how to batch scheduling. + properties: + priorityClassName: + description: PriorityClassName stands for the name of k8s PriorityClass + resource, it's being used in Volcano batch scheduler. + type: string + queue: + description: Queue stands for the resource queue which the application + belongs to, it's being used in Volcano batch scheduler. + type: string + resources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. + If specified, volcano scheduler will consider it as the resources requested. + type: object + type: object + deps: + description: Deps captures all possible types of dependencies of a + Spark application. + properties: + excludePackages: + description: |- + ExcludePackages is a list of "groupId:artifactId", to exclude while resolving the + dependencies provided in Packages to avoid dependency conflicts. + items: + type: string + type: array + files: + description: Files is a list of files the Spark application depends + on. + items: + type: string + type: array + jars: + description: Jars is a list of JAR files the Spark application + depends on. + items: + type: string + type: array + packages: + description: |- + Packages is a list of maven coordinates of jars to include on the driver and executor + classpaths. This will search the local maven repo, then maven central and any additional + remote repositories given by the "repositories" option. + Each package should be of the form "groupId:artifactId:version". + items: + type: string + type: array + pyFiles: + description: PyFiles is a list of Python files the Spark application + depends on. + items: + type: string + type: array + repositories: + description: |- + Repositories is a list of additional remote repositories to search for the maven coordinate + given with the "packages" option. + items: + type: string + type: array + type: object + driver: + description: Driver is the driver specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity settings + for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + description: Annotations are the Kubernetes annotations to be + added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to which + the named objects should be mounted to. + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the driver. + Maps to `spark.kubernetes.driver.request.cores` that is available since Spark 3.0. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + dnsConfig: + description: DnsConfig dns settings for the pod, following the + Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options + of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add to the + pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request for + driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the Kubernetes + specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host networking + for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides Spec.Image + if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the driver. For instance, + GC settings or other logging. + type: string + kubernetesMaster: + description: |- + KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and + other Kubernetes resources. Default to https://kubernetes.default.svc. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added to the + pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for the + pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory to + allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podName: + description: |- + PodName is the name of the driver pod that the user creates. This is used for the + in-cluster client mode in which the user creates a client pod where the driver of + the user application runs. It's an error to set this field if Mode is not + in-cluster-client. + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the pods + objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will be + used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add to + the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: |- + ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + serviceLabels: + additionalProperties: + type: string + description: |- + ServiceLabels defines the labels to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that run + along side the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed in ".spec.tolerations" + to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in ".spec.volumes" + to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + driverIngressOptions: + description: DriverIngressOptions allows configuring the Service and + the Ingress to expose ports inside Spark Driver + items: + description: DriverIngressConfiguration is for driver ingress specific + configuration parameters. + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL certificates + to the ingress object + items: + description: IngressTLS describes the transport layer security + associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + ingressURLFormat: + description: IngressURLFormat is the URL for the ingress. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLabels is a map of key,value pairs of labels + that might be added to the service object. + type: object + servicePort: + description: ServicePort allows configuring the port at service + level that might be different from the targetPort. + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + type: string + serviceType: + description: ServiceType allows configuring the type of the + service. Defaults to ClusterIP. + type: string + required: + - servicePort + - servicePortName + type: object + type: array + dynamicAllocation: + description: |- + DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes + scheduler backend since Spark 3.0. + properties: + enabled: + description: Enabled controls whether dynamic allocation is enabled + or not. + type: boolean + initialExecutors: + description: |- + InitialExecutors is the initial number of executors to request. If .spec.executor.instances + is also set, the initial number of executors is set to the bigger of that and this option. + format: int32 + type: integer + maxExecutors: + description: MaxExecutors is the upper bound for the number of + executors if dynamic allocation is enabled. + format: int32 + type: integer + minExecutors: + description: MinExecutors is the lower bound for the number of + executors if dynamic allocation is enabled. + format: int32 + type: integer + shuffleTrackingTimeout: + description: |- + ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding + shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled). + format: int64 + type: integer + type: object + executor: + description: Executor is the executor specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity settings + for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + description: Annotations are the Kubernetes annotations to be + added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to which + the named objects should be mounted to. + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the executors. + Maps to `spark.kubernetes.executor.request.cores` that is available since Spark 2.4. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + deleteOnTermination: + description: |- + DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. + Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. + type: boolean + dnsConfig: + description: DnsConfig dns settings for the pod, following the + Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options + of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add to the + pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request for + driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the Kubernetes + specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host networking + for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides Spec.Image + if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + instances: + description: Instances is the number of executor instances. + format: int32 + minimum: 1 + type: integer + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the executors. For instance, + GC settings or other logging. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added to the + pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for the + pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory to + allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the pods + objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will be + used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add to + the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that run + along side the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed in ".spec.tolerations" + to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in ".spec.volumes" + to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + failureRetries: + description: |- + FailureRetries is the number of times to retry a failed application before giving up. + This is best effort and actual retry attempts can be >= the value specified. + format: int32 + type: integer + hadoopConf: + additionalProperties: + type: string + description: |- + HadoopConf carries user-specified Hadoop configuration properties as they would use the the "--conf" option + in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop + configuration properties. + type: object + hadoopConfigMap: + description: |- + HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. + The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + image: + description: |- + Image is the container image for the driver, executor, and init-container. Any custom container images for the + driver, executor, or init-container takes precedence over this. + type: string + imagePullPolicy: + description: ImagePullPolicy is the image pull policy for the driver, + executor, and init-container. + type: string + imagePullSecrets: + description: ImagePullSecrets is the list of image-pull secrets. + items: + type: string + type: array + mainApplicationFile: + description: MainFile is the path to a bundled JAR, Python, or R file + of the application. + type: string + mainClass: + description: |- + MainClass is the fully-qualified main class of the Spark application. + This only applies to Java/Scala Spark applications. + type: string + memoryOverheadFactor: + description: |- + This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. + For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will + be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set. + type: string + mode: + description: Mode is the deployment mode of the Spark application. + enum: + - cluster + - client + type: string + monitoring: + description: Monitoring configures how monitoring is handled. + properties: + exposeDriverMetrics: + description: ExposeDriverMetrics specifies whether to expose metrics + on the driver. + type: boolean + exposeExecutorMetrics: + description: ExposeExecutorMetrics specifies whether to expose + metrics on the executors. + type: boolean + metricsProperties: + description: |- + MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. + If not specified, the content in spark-docker/conf/metrics.properties will be used. + type: string + metricsPropertiesFile: + description: |- + MetricsPropertiesFile is the container local path of file metrics.properties for configuring + the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used. + type: string + prometheus: + description: Prometheus is for configuring the Prometheus JMX + exporter. + properties: + configFile: + description: |- + ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. + ConfigFile takes precedence over Configuration, which is shown below. + type: string + configuration: + description: |- + Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. + If not specified, the content in spark-docker/conf/prometheus.yaml will be used. + Configuration has no effect if ConfigFile is set. + type: string + jmxExporterJar: + description: JmxExporterJar is the path to the Prometheus + JMX exporter jar in the container. + type: string + port: + description: |- + Port is the port of the HTTP server run by the Prometheus JMX exporter. + If not specified, 8090 will be used as the default. + format: int32 + maximum: 49151 + minimum: 1024 + type: integer + portName: + description: |- + PortName is the port name of prometheus JMX exporter port. + If not specified, jmx-exporter will be used as the default. + type: string + required: + - jmxExporterJar + type: object + required: + - exposeDriverMetrics + - exposeExecutorMetrics + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). + This field will be deprecated in future versions (at SparkApplicationSpec level). + type: object + proxyUser: + description: |- + ProxyUser specifies the user to impersonate when submitting the application. + It maps to the command-line flag "--proxy-user" in spark-submit. + type: string + pythonVersion: + description: |- + This sets the major Python version of the docker + image used to run the driver and executor containers. Can either be 2 or 3, default 2. + enum: + - "2" + - "3" + type: string + restartPolicy: + description: RestartPolicy defines the policy on if and in which conditions + the controller should restart an application. + properties: + onFailureRetries: + description: OnFailureRetries the number of times to retry running + an application before giving up. + format: int32 + minimum: 0 + type: integer + onFailureRetryInterval: + description: OnFailureRetryInterval is the interval in seconds + between retries on failed runs. + format: int64 + minimum: 1 + type: integer + onSubmissionFailureRetries: + description: |- + OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. + This is best effort and actual retry attempts can be >= the value specified due to caching. + These are required if RestartPolicy is OnFailure. + format: int32 + minimum: 0 + type: integer + onSubmissionFailureRetryInterval: + description: OnSubmissionFailureRetryInterval is the interval + in seconds between retries on failed submissions. + format: int64 + minimum: 1 + type: integer + type: + description: Type specifies the RestartPolicyType. + enum: + - Never + - Always + - OnFailure + type: string + type: object + retryInterval: + description: RetryInterval is the unit of intervals in seconds between + submission retries. + format: int64 + type: integer + sparkConf: + additionalProperties: + type: string + description: |- + SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in + spark-submit. + type: object + sparkConfigMap: + description: |- + SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. + The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + sparkUIOptions: + description: SparkUIOptions allows configuring the Service and the + Ingress to expose the sparkUI + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs of + annotations that might be added to the ingress object. i.e. + specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL certificates + to the ingress object + items: + description: IngressTLS describes the transport layer security + associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs of + annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLabels is a map of key,value pairs of labels + that might be added to the service object. + type: object + servicePort: + description: |- + ServicePort allows configuring the port at service level that might be different from the targetPort. + TargetPort should be the same as the one defined in spark.ui.port + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + Defaults to spark-driver-ui-port. + type: string + serviceType: + description: ServiceType allows configuring the type of the service. + Defaults to ClusterIP. + type: string + type: object + sparkVersion: + description: SparkVersion is the version of Spark the application + uses. + type: string + timeToLiveSeconds: + description: |- + TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication + after its termination. + The SparkApplication object will be garbage collected if the current time is more than the + TimeToLiveSeconds since its termination. + format: int64 + type: integer + type: + description: Type tells the type of the Spark application. + enum: + - Java + - Python + - Scala + - R + type: string + volumes: + description: Volumes is the list of Kubernetes volumes that can be + mounted by the driver and/or executors. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - driver + - executor + - sparkVersion + - type + type: object + status: + description: SparkApplicationStatus defines the observed state of SparkApplication + properties: + applicationState: + description: AppState tells the overall application state. + properties: + errorMessage: + type: string + state: + description: ApplicationStateType represents the type of the current + state of an application. + type: string + required: + - state + type: object + driverInfo: + description: DriverInfo has information about the driver. + properties: + podName: + type: string + webUIAddress: + description: UI Details for the UI created via ClusterIP service + accessible from within the cluster. + type: string + webUIIngressAddress: + type: string + webUIIngressName: + description: Ingress Details if an ingress for the UI was created. + type: string + webUIPort: + format: int32 + type: integer + webUIServiceName: + type: string + type: object + executionAttempts: + description: |- + ExecutionAttempts is the total number of attempts to run a submitted application to completion. + Incremented upon each attempted run of the application and reset upon invalidation. + format: int32 + type: integer + executorState: + additionalProperties: + description: ExecutorState tells the current state of an executor. + type: string + description: ExecutorState records the state of executors by executor + Pod names. + type: object + lastSubmissionAttemptTime: + description: LastSubmissionAttemptTime is the time for the last application + submission attempt. + format: date-time + nullable: true + type: string + sparkApplicationId: + description: SparkApplicationID is set by the spark-distribution(via + spark.app.id config) on the driver and executor pods + type: string + submissionAttempts: + description: |- + SubmissionAttempts is the total number of attempts to submit an application to run. + Incremented upon each attempted submission of the application and reset upon invalidation and rerun. + format: int32 + type: integer + submissionID: + description: SubmissionID is a unique ID of the current submission + of the application. + type: string + terminationTime: + description: CompletionTime is the time when the application runs + to completion if it does. + format: date-time + nullable: true + type: string + required: + - driverInfo + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 000000000..3d5605b3e --- /dev/null +++ b/config/crd/kustomization.yaml @@ -0,0 +1,26 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml +- bases/sparkoperator.k8s.io_sparkapplications.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +- path: patches/webhook_in_sparkapplications.yaml +- path: patches/webhook_in_sparkapplications.yaml +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- path: patches/cainjection_in_scheduledsparkapplications.yaml +#- path: patches/cainjection_in_sparkapplications.yaml +# +kubebuilder:scaffold:crdkustomizecainjectionpatch + +# [WEBHOOK] To enable webhook, uncomment the following section +# the following config is for teaching kustomize how to do kustomization for CRDs. + +configurations: +- kustomizeconfig.yaml diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml new file mode 100644 index 000000000..ec5c150a9 --- /dev/null +++ b/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/crd/patches/cainjection_in_sparkapplications.yaml b/config/crd/patches/cainjection_in_sparkapplications.yaml new file mode 100644 index 000000000..80a2b6df8 --- /dev/null +++ b/config/crd/patches/cainjection_in_sparkapplications.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: sparkapplications.sparkoperator.k8s.io diff --git a/config/crd/patches/webhook_in_sparkapplications.yaml b/config/crd/patches/webhook_in_sparkapplications.yaml new file mode 100644 index 000000000..35f652608 --- /dev/null +++ b/config/crd/patches/webhook_in_sparkapplications.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sparkapplications.sparkoperator.k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml new file mode 100644 index 000000000..738de350b --- /dev/null +++ b/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml new file mode 100644 index 000000000..58549af99 --- /dev/null +++ b/config/default/webhookcainjection_patch.yaml @@ -0,0 +1,25 @@ +# This patch add annotation to admission webhook config and +# CERTIFICATE_NAMESPACE and CERTIFICATE_NAME will be substituted by kustomize +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: mutating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: validatingwebhookconfiguration + app.kubernetes.io/instance: validating-webhook-configuration + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: spark-operator + app.kubernetes.io/part-of: spark-operator + app.kubernetes.io/managed-by: kustomize + name: validating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 000000000..4a9d1d526 --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,130 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: spark-operator-controller +rules: +- resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update +- resources: + - events + verbs: + - create + - patch + - update +- resources: + - nodes + verbs: + - get +- resources: + - pods + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- resources: + - resourcequotas + verbs: + - get + - list + - watch +- resources: + - services + verbs: + - create + - delete + - get +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications/finalizers + verbs: + - update +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications/status + verbs: + - get + - patch + - update +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/finalizers + verbs: + - update +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/status + verbs: + - get + - patch + - update diff --git a/config/rbac/scheduledsparkapplication_editor_role.yaml b/config/rbac/scheduledsparkapplication_editor_role.yaml new file mode 100644 index 000000000..5bae90730 --- /dev/null +++ b/config/rbac/scheduledsparkapplication_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit scheduledsparkapplications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: scheduledsparkapplication-editor-role +rules: +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications/status + verbs: + - get diff --git a/config/rbac/scheduledsparkapplication_viewer_role.yaml b/config/rbac/scheduledsparkapplication_viewer_role.yaml new file mode 100644 index 000000000..29ee54184 --- /dev/null +++ b/config/rbac/scheduledsparkapplication_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view scheduledsparkapplications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: scheduledsparkapplication-viewer-role +rules: +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications + verbs: + - get + - list + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications/status + verbs: + - get diff --git a/config/rbac/sparkapplication_editor_role.yaml b/config/rbac/sparkapplication_editor_role.yaml new file mode 100644 index 000000000..575c2be6e --- /dev/null +++ b/config/rbac/sparkapplication_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit sparkapplications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: sparkapplication-editor-role +rules: +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/status + verbs: + - get diff --git a/config/rbac/sparkapplication_viewer_role.yaml b/config/rbac/sparkapplication_viewer_role.yaml new file mode 100644 index 000000000..4738d708d --- /dev/null +++ b/config/rbac/sparkapplication_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view sparkapplications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: sparkapplication-viewer-role +rules: +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + verbs: + - get + - list + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/status + verbs: + - get diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml new file mode 100644 index 000000000..e139c240b --- /dev/null +++ b/config/samples/kustomization.yaml @@ -0,0 +1,7 @@ +## Append samples of your project ## +resources: +- v1beta1_sparkapplication.yaml +- v1beta1_scheduledsparkapplication.yaml +- v1beta2_sparkapplication.yaml +- v1beta2_scheduledsparkapplication.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/v1beta1_scheduledsparkapplication.yaml b/config/samples/v1beta1_scheduledsparkapplication.yaml new file mode 100644 index 000000000..bc628f489 --- /dev/null +++ b/config/samples/v1beta1_scheduledsparkapplication.yaml @@ -0,0 +1,9 @@ +apiVersion: sparkoperator.k8s.io/v1beta1 +kind: ScheduledSparkApplication +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: scheduledsparkapplication-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/v1beta1_sparkapplication.yaml b/config/samples/v1beta1_sparkapplication.yaml new file mode 100644 index 000000000..d6c3e25b4 --- /dev/null +++ b/config/samples/v1beta1_sparkapplication.yaml @@ -0,0 +1,23 @@ +apiVersion: sparkoperator.k8s.io/v1beta1 +kind: SparkApplication +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: sparkapplication-sample +spec: + type: Scala + mode: cluster + image: spark:3.5.0 + imagePullPolicy: IfNotPresent + mainClass: org.apache.spark.examples.SparkPi + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 + driver: + labels: + version: 3.5.0 + serviceAccount: spark-operator-spark + executor: + labels: + version: 3.5.0 + instances: 1 diff --git a/config/samples/v1beta2_scheduledsparkapplication.yaml b/config/samples/v1beta2_scheduledsparkapplication.yaml new file mode 100644 index 000000000..294430f57 --- /dev/null +++ b/config/samples/v1beta2_scheduledsparkapplication.yaml @@ -0,0 +1,34 @@ +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: ScheduledSparkApplication +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: scheduledsparkapplication-sample +spec: + schedule: "@every 3m" + concurrencyPolicy: Allow + template: + type: Scala + mode: cluster + image: spark:3.5.0 + imagePullPolicy: IfNotPresent + mainClass: org.apache.spark.examples.SparkPi + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 + restartPolicy: + type: Never + driver: + labels: + version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m + serviceAccount: spark-operator-spark + executor: + labels: + version: 3.5.0 + instances: 1 + cores: 1 + coreLimit: 1200m + memory: 512m diff --git a/config/samples/v1beta2_sparkapplication.yaml b/config/samples/v1beta2_sparkapplication.yaml new file mode 100644 index 000000000..70f4152b9 --- /dev/null +++ b/config/samples/v1beta2_sparkapplication.yaml @@ -0,0 +1,23 @@ +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: sparkapplication-sample +spec: + type: Scala + mode: cluster + image: spark:3.5.0 + imagePullPolicy: IfNotPresent + mainClass: org.apache.spark.examples.SparkPi + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 + driver: + labels: + version: 3.5.0 + serviceAccount: spark-operator-spark + executor: + labels: + version: 3.5.0 + instances: 1 diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml new file mode 100644 index 000000000..9cf26134e --- /dev/null +++ b/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml new file mode 100644 index 000000000..206316e54 --- /dev/null +++ b/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,22 @@ +# the following config is for teaching kustomize where to look at when substituting nameReference. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml new file mode 100644 index 000000000..d98b6ec08 --- /dev/null +++ b/config/webhook/manifests.yaml @@ -0,0 +1,119 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate--v1-pod + failurePolicy: Fail + matchPolicy: Exact + name: mutate-pod.sparkoperator.k8s.io + reinvocationPolicy: Never + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-sparkoperator-k8s-io-v1beta2-sparkapplication + failurePolicy: Fail + matchPolicy: Exact + name: mutate-sparkapplication.sparkoperator.k8s.io + reinvocationPolicy: Never + rules: + - apiGroups: + - sparkoperator.k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - sparkapplications + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-sparkoperator-k8s-io-v1beta2-sparkapplication + failurePolicy: Fail + matchPolicy: Exact + name: mutate-scheduledsparkapplication.sparkoperator.k8s.io + rules: + - apiGroups: + - sparkoperator.k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - scheduledsparkapplications + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication + failurePolicy: Fail + matchPolicy: Exact + name: validate-scheduledsparkapplication.sparkoperator.k8s.io + rules: + - apiGroups: + - sparkoperator.k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - scheduledsparkapplications + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-sparkoperator-k8s-io-v1beta2-sparkapplication + failurePolicy: Fail + matchPolicy: Exact + name: validate-sparkapplication.sparkoperator.k8s.io + rules: + - apiGroups: + - sparkoperator.k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - sparkapplications + sideEffects: None diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml new file mode 100644 index 000000000..f171f47f7 --- /dev/null +++ b/config/webhook/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/docs/api-docs.md b/docs/api-docs.md index 245551527..6117b6a23 100644 --- a/docs/api-docs.md +++ b/docs/api-docs.md @@ -9,14 +9,14 @@

Package v1beta2 is the v1beta2 version of the API.

Resource Types: - -

ScheduledSparkApplication +
    +

    ApplicationState

    +

    +(Appears on:SparkApplicationStatus) +

    +

    ApplicationState tells the current state of the application and an error message in case of failures.

    @@ -28,670 +28,609 @@ Resource Types: - - - - - - - - -
    -apiVersion
    -string
    - -sparkoperator.k8s.io/v1beta2 - -
    -kind
    -string -
    ScheduledSparkApplication
    -metadata
    +state
    - -Kubernetes meta/v1.ObjectMeta + +ApplicationStateType
    -Refer to the Kubernetes API documentation for the fields of the -metadata field.
    -spec
    - - -ScheduledSparkApplicationSpec - - -
    -
    -
    - - - - - - - + +
    -schedule
    +errorMessage
    string
    -

    Schedule is a cron schedule on which the application should run.

    -
    -template
    - - -SparkApplicationSpec - - -
    -

    Template is a template from which SparkApplication instances can be created.

    +

    ApplicationStateType +(string alias)

    +

    +(Appears on:ApplicationState) +

    +
    +

    ApplicationStateType represents the type of the current state of an application.

    +
    + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + +
    -suspend
    - -bool - -
    -(Optional) -

    Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. -Defaults to false.

    -
    ValueDescription

    "COMPLETED"

    "FAILED"

    "SUBMISSION_FAILED"

    "FAILING"

    "INVALIDATING"

    ""

    "PENDING_RERUN"

    "RUNNING"

    "SUBMITTED"

    "SUCCEEDING"

    "UNKNOWN"

    +

    BatchSchedulerConfiguration +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    BatchSchedulerConfiguration used to configure how to batch scheduling Spark Application

    +
    + + - - + + + + - -
    -concurrencyPolicy
    - - -ConcurrencyPolicy - - -
    -

    ConcurrencyPolicy is the policy governing concurrent SparkApplication runs.

    -
    FieldDescription
    -successfulRunHistoryLimit
    +queue
    -int32 +string
    (Optional) -

    SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. -Defaults to 1.

    +

    Queue stands for the resource queue which the application belongs to, it’s being used in Volcano batch scheduler.

    -failedRunHistoryLimit
    +priorityClassName
    -int32 +string
    (Optional) -

    FailedRunHistoryLimit is the number of past failed runs of the application to keep. -Defaults to 1.

    -
    +

    PriorityClassName stands for the name of k8s PriorityClass resource, it’s being used in Volcano batch scheduler.

    -status
    +resources
    - -ScheduledSparkApplicationStatus + +Kubernetes core/v1.ResourceList
    +(Optional) +

    Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. +If specified, volcano scheduler will consider it as the resources requested.

    -

    SparkApplication -

    +

    ConcurrencyPolicy +(string alias)

    +

    +(Appears on:ScheduledSparkApplicationSpec) +

    -

    SparkApplication represents a Spark application running on and using Kubernetes as a cluster manager.

    - + - - - - - - - - - - - + - + - - - + - +
    FieldValue Description
    -apiVersion
    -string
    - -sparkoperator.k8s.io/v1beta2 - -
    -kind
    -string -
    SparkApplication
    -metadata
    - - -Kubernetes meta/v1.ObjectMeta - - +

    "Allow"

    ConcurrencyAllow allows SparkApplications to run concurrently.

    -Refer to the Kubernetes API documentation for the fields of the -metadata field. +

    "Forbid"

    ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous +one hasn’t finished yet.

    -spec
    - - -SparkApplicationSpec - - +

    "Replace"

    ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one.

    -
    -
    +
    +

    Dependencies +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    Dependencies specifies all possible types of dependencies of a Spark application.

    +
    + - - - - - - - - - - - - - - - - - - + + + + + +
    -type
    - - -SparkApplicationType - - -
    -

    Type tells the type of the Spark application.

    -
    -sparkVersion
    - -string - -
    -

    SparkVersion is the version of Spark the application uses.

    -
    -mode
    - - -DeployMode - - -
    -

    Mode is the deployment mode of the Spark application.

    -
    -proxyUser
    - -string - -
    -(Optional) -

    ProxyUser specifies the user to impersonate when submitting the application. -It maps to the command-line flag “–proxy-user” in spark-submit.

    -
    -image
    - -string - -
    -(Optional) -

    Image is the container image for the driver, executor, and init-container. Any custom container images for the -driver, executor, or init-container takes precedence over this.

    -
    FieldDescription
    -imagePullPolicy
    +jars
    -string +[]string
    (Optional) -

    ImagePullPolicy is the image pull policy for the driver, executor, and init-container.

    +

    Jars is a list of JAR files the Spark application depends on.

    -imagePullSecrets
    +files
    []string
    (Optional) -

    ImagePullSecrets is the list of image-pull secrets.

    +

    Files is a list of files the Spark application depends on.

    -mainClass
    +pyFiles
    -string +[]string
    (Optional) -

    MainClass is the fully-qualified main class of the Spark application. -This only applies to Java/Scala Spark applications.

    +

    PyFiles is a list of Python files the Spark application depends on.

    -mainApplicationFile
    +packages
    -string +[]string
    (Optional) -

    MainFile is the path to a bundled JAR, Python, or R file of the application.

    +

    Packages is a list of maven coordinates of jars to include on the driver and executor +classpaths. This will search the local maven repo, then maven central and any additional +remote repositories given by the “repositories” option. +Each package should be of the form “groupId:artifactId:version”.

    -arguments
    +excludePackages
    []string
    (Optional) -

    Arguments is a list of arguments to be passed to the application.

    +

    ExcludePackages is a list of “groupId:artifactId”, to exclude while resolving the +dependencies provided in Packages to avoid dependency conflicts.

    -sparkConf
    +repositories
    -map[string]string +[]string
    (Optional) -

    SparkConf carries user-specified Spark configuration properties as they would use the “–conf” option in -spark-submit.

    +

    Repositories is a list of additional remote repositories to search for the maven coordinate +given with the “packages” option.

    +

    DeployMode +(string alias)

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    DeployMode describes the type of deployment of a Spark application.

    +
    + + + + + + + + + + + + + + +
    ValueDescription

    "client"

    "cluster"

    "in-cluster-client"

    +

    DriverInfo +

    +

    +(Appears on:SparkApplicationStatus) +

    +
    +

    DriverInfo captures information about the driver.

    +
    + + + + + + + + + +
    FieldDescription
    -hadoopConf
    +webUIServiceName
    -map[string]string +string
    -(Optional) -

    HadoopConf carries user-specified Hadoop configuration properties as they would use the the “–conf” option -in spark-submit. The SparkApplication controller automatically adds prefix “spark.hadoop.” to Hadoop -configuration properties.

    -sparkConfigMap
    +webUIAddress
    string
    -(Optional) -

    SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. -The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to.

    +

    UI Details for the UI created via ClusterIP service accessible from within the cluster.

    -hadoopConfigMap
    +webUIPort
    -string +int32
    -(Optional) -

    HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. -The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to.

    -volumes
    +webUIIngressName
    - -[]Kubernetes core/v1.Volume - +string
    -(Optional) -

    Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors.

    +

    Ingress Details if an ingress for the UI was created.

    -driver
    +webUIIngressAddress
    - -DriverSpec - +string
    -

    Driver is the driver specification.

    -executor
    +podName
    - -ExecutorSpec - +string
    -

    Executor is the executor specification.

    +

    DriverIngressConfiguration +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    DriverIngressConfiguration is for driver ingress specific configuration parameters.

    +
    + + + + + + + + + +
    FieldDescription
    -deps
    +servicePort
    - -Dependencies - +int32
    -(Optional) -

    Deps captures all possible types of dependencies of a Spark application.

    +

    ServicePort allows configuring the port at service level that might be different from the targetPort.

    -restartPolicy
    +servicePortName
    - -RestartPolicy - +string
    -

    RestartPolicy defines the policy on if and in which conditions the controller should restart an application.

    +

    ServicePortName allows configuring the name of the service port. +This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP.

    -nodeSelector
    +serviceType
    -map[string]string + +Kubernetes core/v1.ServiceType +
    (Optional) -

    NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. -This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). -This field will be deprecated in future versions (at SparkApplicationSpec level).

    +

    ServiceType allows configuring the type of the service. Defaults to ClusterIP.

    -failureRetries
    +serviceAnnotations
    -int32 +map[string]string
    (Optional) -

    FailureRetries is the number of times to retry a failed application before giving up. -This is best effort and actual retry attempts can be >= the value specified.

    +

    ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object.

    -retryInterval
    +serviceLabels
    -int64 +map[string]string
    (Optional) -

    RetryInterval is the unit of intervals in seconds between submission retries.

    +

    ServiceLabels is a map of key,value pairs of labels that might be added to the service object.

    -pythonVersion
    +ingressURLFormat
    string
    -(Optional) -

    This sets the major Python version of the docker -image used to run the driver and executor containers. Can either be 2 or 3, default 2.

    +

    IngressURLFormat is the URL for the ingress.

    -memoryOverheadFactor
    +ingressAnnotations
    -string +map[string]string
    (Optional) -

    This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. -For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will -be overridden by Spec.Driver.MemoryOverhead and Spec.Executor.MemoryOverhead if they are set.

    +

    IngressAnnotations is a map of key,value pairs of annotations that might be added to the ingress object. i.e. specify nginx as ingress.class

    -monitoring
    +ingressTLS
    - -MonitoringSpec + +[]Kubernetes networking/v1.IngressTLS
    (Optional) -

    Monitoring configures how monitoring is handled.

    +

    TlsHosts is useful If we need to declare SSL certificates to the ingress object

    +

    DriverSpec +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    DriverSpec is specification of the driver.

    +
    + + + + + + + + - -
    FieldDescription
    -batchScheduler
    +SparkPodSpec
    -string + +SparkPodSpec +
    -(Optional) -

    BatchScheduler configures which batch scheduler will be used for scheduling

    +

    +(Members of SparkPodSpec are embedded into this type.) +

    -timeToLiveSeconds
    +podName
    -int64 +string
    (Optional) -

    TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication -after its termination. -The SparkApplication object will be garbage collected if the current time is more than the -TimeToLiveSeconds since its termination.

    +

    PodName is the name of the driver pod that the user creates. This is used for the +in-cluster client mode in which the user creates a client pod where the driver of +the user application runs. It’s an error to set this field if Mode is not +in-cluster-client.

    -batchSchedulerOptions
    +coreRequest
    - -BatchSchedulerConfiguration - +string
    (Optional) -

    BatchSchedulerOptions provides fine-grained control on how to batch scheduling.

    +

    CoreRequest is the physical CPU core request for the driver. +Maps to spark.kubernetes.driver.request.cores that is available since Spark 3.0.

    -sparkUIOptions
    +javaOptions
    - -SparkUIConfiguration - +string
    (Optional) -

    SparkUIOptions allows configuring the Service and the Ingress to expose the sparkUI

    +

    JavaOptions is a string of extra JVM options to pass to the driver. For instance, +GC settings or other logging.

    -driverIngressOptions
    +lifecycle
    - -[]DriverIngressConfiguration + +Kubernetes core/v1.Lifecycle
    (Optional) -

    DriverIngressOptions allows configuring the Service and the Ingress to expose ports inside Spark Driver

    +

    Lifecycle for running preStop or postStart commands

    -dynamicAllocation
    +kubernetesMaster
    - -DynamicAllocation - +string
    (Optional) -

    DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes -scheduler backend since Spark 3.0.

    -
    +

    KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and +other Kubernetes resources. Default to https://kubernetes.default.svc.

    -status
    - - -SparkApplicationStatus - +serviceAnnotations
    + +map[string]string +(Optional) +

    ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by +executors to connect to the driver.

    - - -

    ApplicationState -

    -

    -(Appears on:SparkApplicationStatus) -

    -
    -

    ApplicationState tells the current state of the application and an error message in case of failures.

    -
    - - - - - - - -
    FieldDescription
    -state
    +serviceLabels
    - -ApplicationStateType - +map[string]string
    +(Optional) +

    ServiceLabels defines the labels to be added to the Kubernetes headless service used by +executors to connect to the driver.

    -errorMessage
    +ports
    -string + +[]Port +
    +(Optional) +

    Ports settings for the pods, following the Kubernetes specifications.

    -

    ApplicationStateType +

    DriverState (string alias)

    -

    -(Appears on:ApplicationState) -

    -

    ApplicationStateType represents the type of the current state of an application.

    +

    DriverState tells the current state of a spark driver.

    @@ -704,33 +643,21 @@ string - - - - - - - - - + - - - -

    "FAILED"

    "SUBMISSION_FAILED"

    "FAILING"

    "INVALIDATING"

    ""

    "PENDING_RERUN"

    "PENDING"

    "RUNNING"

    "SUBMITTED"

    "SUCCEEDING"

    "UNKNOWN"

    -

    BatchSchedulerConfiguration +

    DynamicAllocation

    (Appears on:SparkApplicationSpec)

    -

    BatchSchedulerConfiguration used to configure how to batch scheduling Spark Application

    +

    DynamicAllocation contains configuration options for dynamic allocation.

    @@ -742,78 +669,74 @@ string - -
    -queue
    +enabled
    -string +bool
    -(Optional) -

    Queue stands for the resource queue which the application belongs to, it’s being used in Volcano batch scheduler.

    +

    Enabled controls whether dynamic allocation is enabled or not.

    -priorityClassName
    +initialExecutors
    -string +int32
    (Optional) -

    PriorityClassName stands for the name of k8s PriorityClass resource, it’s being used in Volcano batch scheduler.

    +

    InitialExecutors is the initial number of executors to request. If .spec.executor.instances +is also set, the initial number of executors is set to the bigger of that and this option.

    -resources
    +minExecutors
    - -Kubernetes core/v1.ResourceList - +int32
    (Optional) -

    Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. -If specified, volcano scheduler will consider it as the resources requested.

    +

    MinExecutors is the lower bound for the number of executors if dynamic allocation is enabled.

    -

    ConcurrencyPolicy -(string alias)

    -

    -(Appears on:ScheduledSparkApplicationSpec) -

    -
    -
    - - - - - - - - - - - - + + - + + +
    ValueDescription

    "Allow"

    ConcurrencyAllow allows SparkApplications to run concurrently.

    +
    +maxExecutors
    + +int32 +

    "Forbid"

    ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous -one hasn’t finished yet.

    +
    +(Optional) +

    MaxExecutors is the upper bound for the number of executors if dynamic allocation is enabled.

    "Replace"

    ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one.

    +
    +shuffleTrackingTimeout
    + +int64 +
    +(Optional) +

    ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding +shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled).

    +
    -

    Dependencies +

    ExecutorSpec

    (Appears on:SparkApplicationSpec)

    -

    Dependencies specifies all possible types of dependencies of a Spark application.

    +

    ExecutorSpec is specification of the executor.

    @@ -825,90 +748,107 @@ one hasn’t finished yet.

    + + + +
    -jars
    +SparkPodSpec
    -[]string + +SparkPodSpec + + +
    +

    +(Members of SparkPodSpec are embedded into this type.) +

    +
    +instances
    + +int32
    (Optional) -

    Jars is a list of JAR files the Spark application depends on.

    +

    Instances is the number of executor instances.

    -files
    +coreRequest
    -[]string +string
    (Optional) -

    Files is a list of files the Spark application depends on.

    +

    CoreRequest is the physical CPU core request for the executors. +Maps to spark.kubernetes.executor.request.cores that is available since Spark 2.4.

    -pyFiles
    +javaOptions
    -[]string +string
    (Optional) -

    PyFiles is a list of Python files the Spark application depends on.

    +

    JavaOptions is a string of extra JVM options to pass to the executors. For instance, +GC settings or other logging.

    -packages
    +lifecycle
    -[]string + +Kubernetes core/v1.Lifecycle +
    (Optional) -

    Packages is a list of maven coordinates of jars to include on the driver and executor -classpaths. This will search the local maven repo, then maven central and any additional -remote repositories given by the “repositories” option. -Each package should be of the form “groupId:artifactId:version”.

    +

    Lifecycle for running preStop or postStart commands

    -excludePackages
    +deleteOnTermination
    -[]string +bool
    (Optional) -

    ExcludePackages is a list of “groupId:artifactId”, to exclude while resolving the -dependencies provided in Packages to avoid dependency conflicts.

    +

    DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. +Maps to spark.kubernetes.executor.deleteOnTermination that is available since Spark 3.0.

    -repositories
    +ports
    -[]string + +[]Port +
    (Optional) -

    Repositories is a list of additional remote repositories to search for the maven coordinate -given with the “packages” option.

    +

    Ports settings for the pods, following the Kubernetes specifications.

    -

    DeployMode +

    ExecutorState (string alias)

    -(Appears on:SparkApplicationSpec) +(Appears on:SparkApplicationStatus)

    -

    DeployMode describes the type of deployment of a Spark application.

    +

    ExecutorState tells the current state of an executor.

    @@ -917,21 +857,24 @@ given with the “packages” option.

    - + - + - + + + + +
    Description

    "client"

    "COMPLETED"

    "cluster"

    "FAILED"

    "in-cluster-client"

    "PENDING"

    "RUNNING"

    "UNKNOWN"

    -

    DriverInfo +

    GPUSpec

    -(Appears on:SparkApplicationStatus) +(Appears on:SparkPodSpec)

    -

    DriverInfo captures information about the driver.

    @@ -943,75 +886,115 @@ given with the “packages” option.

    + + +
    -webUIServiceName
    +name
    string
    +

    Name is GPU resource name, such as: nvidia.com/gpu or amd.com/gpu

    -webUIPort
    +quantity
    -int32 +int64
    -

    UI Details for the UI created via ClusterIP service accessible from within the cluster.

    +

    Quantity is the number of GPUs to request for driver or executor.

    +
    +

    MonitoringSpec +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    MonitoringSpec defines the monitoring specification.

    +
    + + + + + + + + + + +
    FieldDescription
    +exposeDriverMetrics
    + +bool + +
    +

    ExposeDriverMetrics specifies whether to expose metrics on the driver.

    -webUIAddress
    +exposeExecutorMetrics
    -string +bool
    +

    ExposeExecutorMetrics specifies whether to expose metrics on the executors.

    -webUIIngressName
    +metricsProperties
    string
    -

    Ingress Details if an ingress for the UI was created.

    +(Optional) +

    MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. +If not specified, the content in spark-docker/conf/metrics.properties will be used.

    -webUIIngressAddress
    +metricsPropertiesFile
    string
    +(Optional) +

    MetricsPropertiesFile is the container local path of file metrics.properties for configuring +the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used.

    -podName
    +prometheus
    -string + +PrometheusSpec +
    +(Optional) +

    Prometheus is for configuring the Prometheus JMX exporter.

    -

    DriverIngressConfiguration +

    NameKey

    -(Appears on:SparkApplicationSpec) +(Appears on:SparkPodSpec)

    -

    DriverIngressConfiguration is for driver ingress specific configuration parameters.

    +

    NameKey represents the name and key of a SecretKeyRef.

    @@ -1023,111 +1006,120 @@ string + +
    -servicePort
    +name
    -int32 +string
    -

    ServicePort allows configuring the port at service level that might be different from the targetPort.

    -servicePortName
    +key
    string
    -

    ServicePortName allows configuring the name of the service port. -This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP.

    +

    NamePath +

    +

    +(Appears on:SparkPodSpec) +

    +
    +

    NamePath is a pair of a name and a path to which the named objects should be mounted to.

    +
    + + - - + + + + + +
    -serviceType
    - - -Kubernetes core/v1.ServiceType - - -
    -(Optional) -

    ServiceType allows configuring the type of the service. Defaults to ClusterIP.

    -
    FieldDescription
    -serviceAnnotations
    +name
    -map[string]string +string
    -(Optional) -

    ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object.

    -serviceLabels
    +path
    -map[string]string +string
    -(Optional) -

    ServiceLables is a map of key,value pairs of labels that might be added to the service object.

    +

    Port +

    +

    +(Appears on:DriverSpec, ExecutorSpec) +

    +
    +

    Port represents the port definition in the pods objects.

    +
    + + + + + + + +
    FieldDescription
    -ingressURLFormat
    +name
    string
    -

    IngressURLFormat is the URL for the ingress.

    -ingressAnnotations
    +protocol
    -map[string]string +string
    -(Optional) -

    IngressAnnotations is a map of key,value pairs of annotations that might be added to the ingress object. i.e. specify nginx as ingress.class

    -ingressTLS
    +containerPort
    - -[]Kubernetes networking/v1.IngressTLS - +int32
    -(Optional) -

    TlsHosts is useful If we need to declare SSL certificates to the ingress object

    -

    DriverSpec +

    PrometheusSpec

    -(Appears on:SparkApplicationSpec) +(Appears on:MonitoringSpec)

    -

    DriverSpec is specification of the driver.

    +

    PrometheusSpec defines the Prometheus specification when Prometheus is to be used for +collecting and exposing metrics.

    @@ -1139,133 +1131,158 @@ map[string]string + + + + + +
    -SparkPodSpec
    +jmxExporterJar
    - -SparkPodSpec - +string
    -

    -(Members of SparkPodSpec are embedded into this type.) -

    +

    JmxExporterJar is the path to the Prometheus JMX exporter jar in the container.

    -podName
    +port
    + +int32 + +
    +(Optional) +

    Port is the port of the HTTP server run by the Prometheus JMX exporter. +If not specified, 8090 will be used as the default.

    +
    +portName
    string
    (Optional) -

    PodName is the name of the driver pod that the user creates. This is used for the -in-cluster client mode in which the user creates a client pod where the driver of -the user application runs. It’s an error to set this field if Mode is not -in-cluster-client.

    +

    PortName is the port name of prometheus JMX exporter port. +If not specified, jmx-exporter will be used as the default.

    -coreRequest
    +configFile
    string
    (Optional) -

    CoreRequest is the physical CPU core request for the driver. -Maps to spark.kubernetes.driver.request.cores that is available since Spark 3.0.

    +

    ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. +ConfigFile takes precedence over Configuration, which is shown below.

    -javaOptions
    +configuration
    string
    (Optional) -

    JavaOptions is a string of extra JVM options to pass to the driver. For instance, -GC settings or other logging.

    +

    Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. +If not specified, the content in spark-docker/conf/prometheus.yaml will be used. +Configuration has no effect if ConfigFile is set.

    +

    RestartPolicy +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. +This completely defines actions to be taken on any kind of Failures during an application run.

    +
    + + + + + + + +
    FieldDescription
    -lifecycle
    +type
    - -Kubernetes core/v1.Lifecycle + +RestartPolicyType
    -(Optional) -

    Lifecycle for running preStop or postStart commands

    +

    Type specifies the RestartPolicyType.

    -kubernetesMaster
    +onSubmissionFailureRetries
    -string +int32
    (Optional) -

    KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and -other Kubernetes resources. Default to https://kubernetes.default.svc.

    +

    OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. +This is best effort and actual retry attempts can be >= the value specified due to caching. +These are required if RestartPolicy is OnFailure.

    -serviceAnnotations
    +onFailureRetries
    -map[string]string +int32
    (Optional) -

    ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by -executors to connect to the driver.

    +

    OnFailureRetries the number of times to retry running an application before giving up.

    -serviceLabels
    +onSubmissionFailureRetryInterval
    -map[string]string +int64
    (Optional) -

    ServiceLabels defines the labels to be added to the Kubernetes headless service used by -executors to connect to the driver.

    +

    OnSubmissionFailureRetryInterval is the interval in seconds between retries on failed submissions.

    -ports
    +onFailureRetryInterval
    - -[]Port - +int64
    (Optional) -

    Ports settings for the pods, following the Kubernetes specifications.

    +

    OnFailureRetryInterval is the interval in seconds between retries on failed runs.

    -

    DriverState +

    RestartPolicyType (string alias)

    +

    +(Appears on:RestartPolicy) +

    -

    DriverState tells the current state of a spark driver.

    @@ -1274,25 +1291,42 @@ executors to connect to the driver.

    - + - + - + - + +
    Description

    "COMPLETED"

    "Always"

    "FAILED"

    "Never"

    "PENDING"

    "OnFailure"

    "RUNNING"

    +

    ScheduleState +(string alias)

    +

    +(Appears on:ScheduledSparkApplicationStatus) +

    +
    +
    + + + + + + + + - + + + + +
    ValueDescription

    "FailedValidation"

    "UNKNOWN"

    ""

    "Scheduled"

    "Validating"

    -

    DynamicAllocation +

    ScheduledSparkApplication

    -

    -(Appears on:SparkApplicationSpec) -

    -

    DynamicAllocation contains configuration options for dynamic allocation.

    +

    ScheduledSparkApplication is the Schema for the scheduledsparkapplications API.

    @@ -1304,74 +1338,131 @@ executors to connect to the driver.

    +
    -enabled
    +metadata
    -bool + +Kubernetes meta/v1.ObjectMeta +
    -

    Enabled controls whether dynamic allocation is enabled or not.

    +Refer to the Kubernetes API documentation for the fields of the +metadata field.
    -initialExecutors
    +spec
    -int32 + +ScheduledSparkApplicationSpec + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    +schedule
    + +string + +
    +

    Schedule is a cron schedule on which the application should run.

    +
    +template
    + + +SparkApplicationSpec + + +
    +

    Template is a template from which SparkApplication instances can be created.

    +
    +suspend
    + +bool
    (Optional) -

    InitialExecutors is the initial number of executors to request. If .spec.executor.instances -is also set, the initial number of executors is set to the bigger of that and this option.

    +

    Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. +Defaults to false.

    -minExecutors
    +concurrencyPolicy
    + + +ConcurrencyPolicy + + +
    +

    ConcurrencyPolicy is the policy governing concurrent SparkApplication runs.

    +
    +successfulRunHistoryLimit
    int32
    (Optional) -

    MinExecutors is the lower bound for the number of executors if dynamic allocation is enabled.

    +

    SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. +Defaults to 1.

    -maxExecutors
    +failedRunHistoryLimit
    int32
    (Optional) -

    MaxExecutors is the upper bound for the number of executors if dynamic allocation is enabled.

    +

    FailedRunHistoryLimit is the number of past failed runs of the application to keep. +Defaults to 1.

    +
    -shuffleTrackingTimeout
    +status
    -int64 + +ScheduledSparkApplicationStatus +
    -(Optional) -

    ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding -shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled).

    -

    ExecutorSpec +

    ScheduledSparkApplicationSpec

    -(Appears on:SparkApplicationSpec) +(Appears on:ScheduledSparkApplication)

    -

    ExecutorSpec is specification of the executor.

    +

    ScheduledSparkApplicationSpec defines the desired state of ScheduledSparkApplication.

    @@ -1383,133 +1474,89 @@ shuffle data if shuffle tracking is enabled (true by default if dynamic allocati - - - -
    -SparkPodSpec
    - - -SparkPodSpec - - -
    -

    -(Members of SparkPodSpec are embedded into this type.) -

    -
    -instances
    +schedule
    -int32 +string
    -(Optional) -

    Instances is the number of executor instances.

    +

    Schedule is a cron schedule on which the application should run.

    -coreRequest
    +template
    -string + +SparkApplicationSpec +
    -(Optional) -

    CoreRequest is the physical CPU core request for the executors. -Maps to spark.kubernetes.executor.request.cores that is available since Spark 2.4.

    +

    Template is a template from which SparkApplication instances can be created.

    -javaOptions
    +suspend
    -string +bool
    (Optional) -

    JavaOptions is a string of extra JVM options to pass to the executors. For instance, -GC settings or other logging.

    +

    Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. +Defaults to false.

    -lifecycle
    +concurrencyPolicy
    - -Kubernetes core/v1.Lifecycle + +ConcurrencyPolicy
    -(Optional) -

    Lifecycle for running preStop or postStart commands

    +

    ConcurrencyPolicy is the policy governing concurrent SparkApplication runs.

    -deleteOnTermination
    +successfulRunHistoryLimit
    -bool +int32
    (Optional) -

    DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. -Maps to spark.kubernetes.executor.deleteOnTermination that is available since Spark 3.0.

    +

    SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. +Defaults to 1.

    -ports
    +failedRunHistoryLimit
    - -[]Port - +int32
    (Optional) -

    Ports settings for the pods, following the Kubernetes specifications.

    +

    FailedRunHistoryLimit is the number of past failed runs of the application to keep. +Defaults to 1.

    -

    ExecutorState -(string alias)

    -

    -(Appears on:SparkApplicationStatus) -

    -
    -

    ExecutorState tells the current state of an executor.

    -
    - - - - - - - - - - - - - - - - - - -
    ValueDescription

    "COMPLETED"

    "FAILED"

    "PENDING"

    "RUNNING"

    "UNKNOWN"

    -

    GPUSpec +

    ScheduledSparkApplicationStatus

    -(Appears on:SparkPodSpec) +(Appears on:ScheduledSparkApplication)

    +

    ScheduledSparkApplicationStatus defines the observed state of ScheduledSparkApplication.

    @@ -1521,115 +1568,96 @@ Maps to spark.kubernetes.executor.deleteOnTermination that is avail - -
    -name
    +lastRun
    -string + +Kubernetes meta/v1.Time +
    -

    Name is GPU resource name, such as: nvidia.com/gpu or amd.com/gpu

    +

    LastRun is the time when the last run of the application started.

    -quantity
    +nextRun
    -int64 + +Kubernetes meta/v1.Time +
    -

    Quantity is the number of GPUs to request for driver or executor.

    +

    NextRun is the time when the next run of the application will start.

    -

    MonitoringSpec -

    -

    -(Appears on:SparkApplicationSpec) -

    -
    -

    MonitoringSpec defines the monitoring specification.

    -
    - - - - - - - -
    FieldDescription
    -exposeDriverMetrics
    +lastRunName
    -bool +string
    -

    ExposeDriverMetrics specifies whether to expose metrics on the driver.

    +

    LastRunName is the name of the SparkApplication for the most recent run of the application.

    -exposeExecutorMetrics
    +pastSuccessfulRunNames
    -bool +[]string
    -

    ExposeExecutorMetrics specifies whether to expose metrics on the executors.

    +

    PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs.

    -metricsProperties
    +pastFailedRunNames
    -string +[]string
    -(Optional) -

    MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. -If not specified, the content in spark-docker/conf/metrics.properties will be used.

    +

    PastFailedRunNames keeps the names of SparkApplications for past failed runs.

    -metricsPropertiesFile
    +scheduleState
    -string + +ScheduleState +
    -(Optional) -

    MetricsPropertiesFile is the container local path of file metrics.properties for configuring -the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used.

    +

    ScheduleState is the current scheduling state of the application.

    -prometheus
    +reason
    - -PrometheusSpec - +string
    -(Optional) -

    Prometheus is for configuring the Prometheus JMX exporter.

    +

    Reason tells why the ScheduledSparkApplication is in the particular ScheduleState.

    -

    NameKey +

    SecretInfo

    (Appears on:SparkPodSpec)

    -

    NameKey represents the name and key of a SecretKeyRef.

    +

    SecretInfo captures information of a secret.

    @@ -1651,7 +1679,7 @@ string + + + +
    -key
    +path
    string @@ -1659,53 +1687,52 @@ string
    +secretType
    + + +SecretType + + +
    +
    -

    NamePath -

    +

    SecretType +(string alias)

    -(Appears on:SparkPodSpec) +(Appears on:SecretInfo)

    -

    NamePath is a pair of a name and a path to which the named objects should be mounted to.

    +

    SecretType tells the type of a secret.

    - + - - - - + - - - + - + - - +
    FieldValue Description
    -name
    - -string - -
    +

    "GCPServiceAccount"

    SecretTypeGCPServiceAccount is for secrets from a GCP service account Json key file that needs +the environment variable GOOGLE_APPLICATION_CREDENTIALS.

    -path
    - -string - +

    "Generic"

    SecretTypeGeneric is for secrets that needs no special handling.

    +

    "HadoopDelegationToken"

    SecretTypeHadoopDelegationToken is for secrets from an Hadoop delegation token that needs the +environment variable HADOOP_TOKEN_FILE_LOCATION.

    -

    Port +

    SparkApplication

    -

    -(Appears on:DriverSpec, ExecutorSpec) -

    -

    Port represents the port definition in the pods objects.

    +

    SparkApplication is the Schema for the sparkapplications API

    @@ -1717,521 +1744,463 @@ string - +
    +
    +
    -name
    +metadata
    -string + +Kubernetes meta/v1.ObjectMeta +
    +Refer to the Kubernetes API documentation for the fields of the +metadata field.
    -protocol
    +spec
    -string + +SparkApplicationSpec +
    -
    - -
    -containerPort
    +type
    -int32 + +SparkApplicationType +
    +

    Type tells the type of the Spark application.

    -

    PrometheusSpec -

    -

    -(Appears on:MonitoringSpec) -

    -
    -

    PrometheusSpec defines the Prometheus specification when Prometheus is to be used for -collecting and exposing metrics.

    -
    - - - - - - - - - -
    FieldDescription
    -jmxExporterJar
    +sparkVersion
    string
    -

    JmxExporterJar is the path to the Prometheus JMX exporter jar in the container.

    +

    SparkVersion is the version of Spark the application uses.

    -port
    +mode
    -int32 + +DeployMode +
    -(Optional) -

    Port is the port of the HTTP server run by the Prometheus JMX exporter. -If not specified, 8090 will be used as the default.

    +

    Mode is the deployment mode of the Spark application.

    -portName
    +proxyUser
    string
    (Optional) -

    PortName is the port name of prometheus JMX exporter port. -If not specified, jmx-exporter will be used as the default.

    +

    ProxyUser specifies the user to impersonate when submitting the application. +It maps to the command-line flag “–proxy-user” in spark-submit.

    -configFile
    +image
    string
    (Optional) -

    ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. -ConfigFile takes precedence over Configuration, which is shown below.

    +

    Image is the container image for the driver, executor, and init-container. Any custom container images for the +driver, executor, or init-container takes precedence over this.

    -configuration
    +imagePullPolicy
    string
    (Optional) -

    Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. -If not specified, the content in spark-docker/conf/prometheus.yaml will be used. -Configuration has no effect if ConfigFile is set.

    +

    ImagePullPolicy is the image pull policy for the driver, executor, and init-container.

    -

    RestartPolicy -

    -

    -(Appears on:SparkApplicationSpec) -

    -
    -

    RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. -This completely defines actions to be taken on any kind of Failures during an application run.

    -
    - - - - - - - - - - - -
    FieldDescription
    -type
    +imagePullSecrets
    - -RestartPolicyType - +[]string
    -

    Type specifies the RestartPolicyType.

    +(Optional) +

    ImagePullSecrets is the list of image-pull secrets.

    -onSubmissionFailureRetries
    +mainClass
    -int32 +string
    (Optional) -

    OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. -This is best effort and actual retry attempts can be >= the value specified due to caching. -These are required if RestartPolicy is OnFailure.

    +

    MainClass is the fully-qualified main class of the Spark application. +This only applies to Java/Scala Spark applications.

    -onFailureRetries
    +mainApplicationFile
    -int32 +string
    (Optional) -

    OnFailureRetries the number of times to retry running an application before giving up.

    +

    MainFile is the path to a bundled JAR, Python, or R file of the application.

    -onSubmissionFailureRetryInterval
    +arguments
    -int64 +[]string
    (Optional) -

    OnSubmissionFailureRetryInterval is the interval in seconds between retries on failed submissions.

    +

    Arguments is a list of arguments to be passed to the application.

    -onFailureRetryInterval
    +sparkConf
    -int64 +map[string]string
    -(Optional) -

    OnFailureRetryInterval is the interval in seconds between retries on failed runs.

    -
    -

    RestartPolicyType -(string alias)

    -

    -(Appears on:RestartPolicy) -

    -
    -
    - - - - - - - - - - - - - - -
    ValueDescription

    "Always"

    "Never"

    "OnFailure"

    -

    ScheduleState -(string alias)

    -

    -(Appears on:ScheduledSparkApplicationStatus) -

    -
    -
    - - - - - - - - - - - - -
    ValueDescription

    "FailedValidation"

    "Scheduled"

    -

    ScheduledSparkApplicationSpec -

    -

    -(Appears on:ScheduledSparkApplication) -

    -
    -
    - - + + - - + + - - - -
    +(Optional) +

    SparkConf carries user-specified Spark configuration properties as they would use the “–conf” option in +spark-submit.

    +
    FieldDescription +hadoopConf
    + +map[string]string + +
    +(Optional) +

    HadoopConf carries user-specified Hadoop configuration properties as they would use the the “–conf” option +in spark-submit. The SparkApplication controller automatically adds prefix “spark.hadoop.” to Hadoop +configuration properties.

    +
    -schedule
    +sparkConfigMap
    string
    -

    Schedule is a cron schedule on which the application should run.

    +(Optional) +

    SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. +The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to.

    -template
    +hadoopConfigMap
    - -SparkApplicationSpec - +string
    -

    Template is a template from which SparkApplication instances can be created.

    +(Optional) +

    HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. +The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to.

    -suspend
    +volumes
    -bool + +[]Kubernetes core/v1.Volume +
    (Optional) -

    Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. -Defaults to false.

    +

    Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors.

    -concurrencyPolicy
    +driver
    - -ConcurrencyPolicy + +DriverSpec
    -

    ConcurrencyPolicy is the policy governing concurrent SparkApplication runs.

    +

    Driver is the driver specification.

    -successfulRunHistoryLimit
    +executor
    -int32 + +ExecutorSpec +
    -(Optional) -

    SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. -Defaults to 1.

    +

    Executor is the executor specification.

    -failedRunHistoryLimit
    +deps
    -int32 + +Dependencies +
    (Optional) -

    FailedRunHistoryLimit is the number of past failed runs of the application to keep. -Defaults to 1.

    +

    Deps captures all possible types of dependencies of a Spark application.

    -

    ScheduledSparkApplicationStatus -

    -

    -(Appears on:ScheduledSparkApplication) -

    -
    -
    - - - - + + - - - -
    FieldDescription +restartPolicy
    + + +RestartPolicy + + +
    +

    RestartPolicy defines the policy on if and in which conditions the controller should restart an application.

    +
    -lastRun
    +nodeSelector
    - -Kubernetes meta/v1.Time - +map[string]string
    -

    LastRun is the time when the last run of the application started.

    +(Optional) +

    NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. +This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). +This field will be deprecated in future versions (at SparkApplicationSpec level).

    -nextRun
    +failureRetries
    - -Kubernetes meta/v1.Time - +int32
    -

    NextRun is the time when the next run of the application will start.

    +(Optional) +

    FailureRetries is the number of times to retry a failed application before giving up. +This is best effort and actual retry attempts can be >= the value specified.

    -lastRunName
    +retryInterval
    -string +int64
    -

    LastRunName is the name of the SparkApplication for the most recent run of the application.

    +(Optional) +

    RetryInterval is the unit of intervals in seconds between submission retries.

    -pastSuccessfulRunNames
    +pythonVersion
    -[]string +string
    -

    PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs.

    +(Optional) +

    This sets the major Python version of the docker +image used to run the driver and executor containers. Can either be 2 or 3, default 2.

    -pastFailedRunNames
    +memoryOverheadFactor
    -[]string +string
    -

    PastFailedRunNames keeps the names of SparkApplications for past failed runs.

    +(Optional) +

    This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. +For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will +be overridden by Spec.Driver.MemoryOverhead and Spec.Executor.MemoryOverhead if they are set.

    -scheduleState
    +monitoring
    - -ScheduleState + +MonitoringSpec
    -

    ScheduleState is the current scheduling state of the application.

    +(Optional) +

    Monitoring configures how monitoring is handled.

    -reason
    +batchScheduler
    string
    -

    Reason tells why the ScheduledSparkApplication is in the particular ScheduleState.

    +(Optional) +

    BatchScheduler configures which batch scheduler will be used for scheduling

    -

    SecretInfo -

    -

    -(Appears on:SparkPodSpec) -

    -
    -

    SecretInfo captures information of a secret.

    -
    - - - - + + - - - -
    FieldDescription +timeToLiveSeconds
    + +int64 + +
    +(Optional) +

    TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication +after its termination. +The SparkApplication object will be garbage collected if the current time is more than the +TimeToLiveSeconds since its termination.

    +
    -name
    +batchSchedulerOptions
    -string + +BatchSchedulerConfiguration +
    +(Optional) +

    BatchSchedulerOptions provides fine-grained control on how to batch scheduling.

    -path
    +sparkUIOptions
    -string + +SparkUIConfiguration +
    +(Optional) +

    SparkUIOptions allows configuring the Service and the Ingress to expose the sparkUI

    -secretType
    +driverIngressOptions
    - -SecretType + +[]DriverIngressConfiguration
    +(Optional) +

    DriverIngressOptions allows configuring the Service and the Ingress to expose ports inside Spark Driver

    -

    SecretType -(string alias)

    -

    -(Appears on:SecretInfo) -

    -
    -

    SecretType tells the type of a secret.

    -
    - - - - + + - - -
    ValueDescription +dynamicAllocation
    + + +DynamicAllocation + + +
    +(Optional) +

    DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes +scheduler backend since Spark 3.0.

    +

    "GCPServiceAccount"

    GCPServiceAccountSecret is for secrets from a GCP service account Json key file that needs -the environment variable GOOGLE_APPLICATION_CREDENTIALS.

    +
    -

    "Generic"

    -

    GenericType is for secrets that needs no special handling.

    + + + +status
    + + +SparkApplicationStatus + + -

    "HadoopDelegationToken"

    -

    HadoopDelegationTokenSecret is for secrets from an Hadoop delegation token that needs the -environment variable HADOOP_TOKEN_FILE_LOCATION.

    + - + +

    SparkApplicationSpec

    -(Appears on:SparkApplication, ScheduledSparkApplicationSpec) +(Appears on:ScheduledSparkApplicationSpec, SparkApplication)

    -

    SparkApplicationSpec describes the specification of a Spark application using Kubernetes as a cluster manager. +

    SparkApplicationSpec defines the desired state of SparkApplication It carries every pieces of information a spark-submit command takes and recognizes.

    @@ -2423,7 +2392,7 @@ The controller will add environment variable HADOOP_CONF_DIR to the path where t
    volumes
    - + []Kubernetes core/v1.Volume @@ -2658,7 +2627,7 @@ scheduler backend since Spark 3.0.

    (Appears on:SparkApplication)

    -

    SparkApplicationStatus describes the current status of a Spark application.

    +

    SparkApplicationStatus defines the observed state of SparkApplication

    @@ -2694,7 +2663,7 @@ string @@ -2929,7 +2898,7 @@ string @@ -3009,7 +2978,7 @@ map[string]string @@ -3303,7 +3272,7 @@ map[string]string
    lastSubmissionAttemptTime
    - + Kubernetes meta/v1.Time @@ -2707,7 +2676,7 @@ Kubernetes meta/v1.Time
    terminationTime
    - + Kubernetes meta/v1.Time @@ -2747,7 +2716,7 @@ ApplicationState executorState
    -map[string]github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2.ExecutorState +map[string]github.com/kubeflow/spark-operator/api/v1beta2.ExecutorState
    env
    - + []Kubernetes core/v1.EnvVar @@ -2956,7 +2925,7 @@ Deprecated. Consider using env instead.

    envFrom
    - + []Kubernetes core/v1.EnvFromSource @@ -2971,7 +2940,7 @@ Deprecated. Consider using env instead.

    envSecretKeyRefs
    -map[string]github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2.NameKey +map[string]github.com/kubeflow/spark-operator/api/v1beta2.NameKey
    volumeMounts
    - + []Kubernetes core/v1.VolumeMount @@ -3023,7 +2992,7 @@ map[string]string
    affinity
    - + Kubernetes core/v1.Affinity @@ -3037,7 +3006,7 @@ Kubernetes core/v1.Affinity
    tolerations
    - + []Kubernetes core/v1.Toleration @@ -3051,7 +3020,7 @@ Kubernetes core/v1.Affinity
    podSecurityContext
    - + Kubernetes core/v1.PodSecurityContext @@ -3065,7 +3034,7 @@ Kubernetes core/v1.PodSecurityContext
    securityContext
    - + Kubernetes core/v1.SecurityContext @@ -3091,7 +3060,7 @@ string
    sidecars
    - + []Kubernetes core/v1.Container @@ -3105,7 +3074,7 @@ string
    initContainers
    - + []Kubernetes core/v1.Container @@ -3144,7 +3113,7 @@ This field is mutually exclusive with nodeSelector at SparkApplication level (wh
    dnsConfig
    - + Kubernetes core/v1.PodDNSConfig @@ -3182,7 +3151,7 @@ string
    hostAliases
    - + []Kubernetes core/v1.HostAlias @@ -3253,7 +3222,7 @@ Defaults to spark-driver-ui-port.

    serviceType
    - + Kubernetes core/v1.ServiceType @@ -3284,7 +3253,7 @@ map[string]string
    (Optional) -

    ServiceLables is a map of key,value pairs of labels that might be added to the service object.

    +

    ServiceLabels is a map of key,value pairs of labels that might be added to the service object.

    ingressTLS
    - + []Kubernetes networking/v1.IngressTLS @@ -3317,5 +3286,5 @@ map[string]string

    -Generated with https://github.com/ahmetb/gen-crd-api-reference-docs.git on git commit ccf856504caaeac38151b57a950d3f8a7942b9db. +Generated with gen-crd-api-reference-docs.

    diff --git a/entrypoint.sh b/entrypoint.sh index f3c83ebad..0ca873012 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -4,21 +4,18 @@ set -ex # Check whether there is a passwd entry for the container UID -myuid=$(id -u) -mygid=$(id -g) +uid=$(id -u) +gid=$(id -g) + # turn off -e for getent because it will return error code in anonymous uid case set +e -uidentry=$(getent passwd $myuid) +uidentry=$(getent passwd $uid) set -e -echo $myuid -echo $mygid -echo $uidentry - # If there is no passwd entry for the container UID, attempt to create one if [[ -z "$uidentry" ]] ; then if [[ -w /etc/passwd ]] ; then - echo "$myuid:x:$myuid:$mygid:anonymous uid:$SPARK_HOME:/bin/false" >> /etc/passwd + echo "$uid:x:$uid:$gid:anonymous uid:$SPARK_HOME:/bin/false" >> /etc/passwd else echo "Container ENTRYPOINT failed to add passwd entry for anonymous UID" fi diff --git a/examples/spark-operator-with-metrics.yaml b/examples/spark-operator-with-metrics.yaml deleted file mode 100644 index 3513b506a..000000000 --- a/examples/spark-operator-with-metrics.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkoperator - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - strategy: - type: Recreate - template: - metadata: - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "10254" - prometheus.io/path: "/metrics" - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - spec: - serviceAccountName: sparkoperator - containers: - - name: sparkoperator - image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 - imagePullPolicy: Always - ports: - - containerPort: 10254 - args: - - -logtostderr - - -enable-metrics=true - - -metrics-labels=app_type \ No newline at end of file diff --git a/examples/spark-operator-with-webhook.yaml b/examples/spark-operator-with-webhook.yaml deleted file mode 100644 index 25fa81d5e..000000000 --- a/examples/spark-operator-with-webhook.yaml +++ /dev/null @@ -1,94 +0,0 @@ -# -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkoperator - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - strategy: - type: Recreate - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - spec: - serviceAccountName: sparkoperator - volumes: - - name: webhook-certs - secret: - secretName: spark-webhook-certs - containers: - - name: sparkoperator - image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 - imagePullPolicy: Always - volumeMounts: - - name: webhook-certs - mountPath: /etc/webhook-certs - ports: - - containerPort: 8080 - args: - - -logtostderr - - -enable-webhook=true - - -v=2 ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: sparkoperator-init - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 -spec: - backoffLimit: 3 - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - spec: - serviceAccountName: sparkoperator - restartPolicy: Never - containers: - - name: main - image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 - imagePullPolicy: IfNotPresent - command: ["/usr/bin/gencerts.sh", "-p"] ---- -kind: Service -apiVersion: v1 -metadata: - name: spark-webhook - namespace: spark-operator -spec: - ports: - - port: 443 - targetPort: 8080 - name: webhook - selector: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 diff --git a/examples/spark-pi-configmap.yaml b/examples/spark-pi-configmap.yaml index a6a5dc023..e2b4bc556 100644 --- a/examples/spark-pi-configmap.yaml +++ b/examples/spark-pi-configmap.yaml @@ -13,41 +13,41 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: - name: spark-pi + name: spark-pi-configmap namespace: default spec: type: Scala mode: cluster - image: "spark:3.5.0" - imagePullPolicy: Always + image: spark:3.5.0 + imagePullPolicy: IfNotPresent mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" - sparkVersion: "3.5.0" + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 restartPolicy: type: Never volumes: - - name: config-vol - configMap: - name: dummy-cm + - name: config-vol + configMap: + name: test-configmap driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" labels: version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m serviceAccount: spark-operator-spark volumeMounts: - - name: config-vol - mountPath: /opt/spark/mycm + - name: config-vol + mountPath: /opt/spark/config executor: - cores: 1 - instances: 1 - memory: "512m" labels: version: 3.5.0 + instances: 1 + cores: 1 + memory: 512m volumeMounts: - - name: config-vol - mountPath: /opt/spark/mycm + - name: config-vol + mountPath: /opt/spark/config diff --git a/examples/spark-pi-custom-resource.yaml b/examples/spark-pi-custom-resource.yaml index 1e70098d2..83df405e1 100644 --- a/examples/spark-pi-custom-resource.yaml +++ b/examples/spark-pi-custom-resource.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: name: spark-pi-custom-resource @@ -21,38 +21,34 @@ metadata: spec: type: Scala mode: cluster - image: "spark:3.5.0" - imagePullPolicy: Always + image: spark:3.5.0 + imagePullPolicy: IfNotPresent mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" - sparkVersion: "3.5.0" + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 restartPolicy: type: Never volumes: - - name: "test-volume" - hostPath: - path: "/tmp" - type: Directory + - name: test-volume + hostPath: + path: /tmp + type: Directory driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" labels: version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m serviceAccount: spark-operator-spark volumeMounts: - - name: "test-volume" - mountPath: "/tmp" + - name: test-volume + mountPath: /tmp executor: - cores: 1 - instances: 1 - memory: "512m" labels: version: 3.5.0 + instances: 1 + cores: 1 + memory: 512m volumeMounts: - - name: "test-volume" - mountPath: "/tmp" - batchSchedulerOptions: - resources: - cpu: "2" - memory: "4096m" + - name: test-volume + mountPath: /tmp diff --git a/examples/spark-pi-dynamic-allocation.yaml b/examples/spark-pi-dynamic-allocation.yaml new file mode 100644 index 000000000..800313914 --- /dev/null +++ b/examples/spark-pi-dynamic-allocation.yaml @@ -0,0 +1,49 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + name: spark-pi-dynamic-allocation + namespace: default +spec: + type: Scala + mode: cluster + image: spark:3.5.0 + imagePullPolicy: IfNotPresent + mainClass: org.apache.spark.examples.SparkPi + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 + arguments: + - "50000" + driver: + labels: + version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m + serviceAccount: spark-operator-spark + executor: + labels: + version: 3.5.0 + instances: 1 + cores: 1 + coreLimit: 1200m + memory: 512m + dynamicAllocation: + enabled: true + initialExecutors: 2 + maxExecutors: 5 + minExecutors: 1 diff --git a/examples/spark-pi-prometheus.yaml b/examples/spark-pi-prometheus.yaml index b47de1db6..29a447061 100644 --- a/examples/spark-pi-prometheus.yaml +++ b/examples/spark-pi-prometheus.yaml @@ -14,7 +14,7 @@ # limitations under the License. # -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: name: spark-pi @@ -22,31 +22,31 @@ metadata: spec: type: Scala mode: cluster - image: "gcr.io/spark-operator/spark:v3.1.1-gcs-prometheus" + image: gcr.io/spark-operator/spark:v3.1.1-gcs-prometheus imagePullPolicy: Always mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar" + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar arguments: - - "100000" - sparkVersion: "3.1.1" + - "100000" + sparkVersion: 3.1.1 restartPolicy: type: Never driver: cores: 1 - coreLimit: "1200m" - memory: "512m" + coreLimit: 1200m + memory: 512m labels: version: 3.1.1 serviceAccount: spark-operator-spark executor: cores: 1 instances: 1 - memory: "512m" + memory: 512m labels: version: 3.1.1 monitoring: exposeDriverMetrics: true exposeExecutorMetrics: true prometheus: - jmxExporterJar: "/prometheus/jmx_prometheus_javaagent-0.11.0.jar" + jmxExporterJar: /prometheus/jmx_prometheus_javaagent-0.11.0.jar port: 8090 diff --git a/examples/spark-py-pi.yaml b/examples/spark-pi-python.yaml similarity index 72% rename from examples/spark-py-pi.yaml rename to examples/spark-pi-python.yaml index 11a193cfd..5d0a7f273 100644 --- a/examples/spark-py-pi.yaml +++ b/examples/spark-pi-python.yaml @@ -13,35 +13,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: - name: pyspark-pi + name: spark-pi-python namespace: default spec: type: Python pythonVersion: "3" mode: cluster - image: "spark:3.5.0" - imagePullPolicy: Always + image: spark:3.5.0 + imagePullPolicy: IfNotPresent mainApplicationFile: local:///opt/spark/examples/src/main/python/pi.py - sparkVersion: "3.5.0" - restartPolicy: - type: OnFailure - onFailureRetries: 3 - onFailureRetryInterval: 10 - onSubmissionFailureRetries: 5 - onSubmissionFailureRetryInterval: 20 + sparkVersion: 3.5.0 driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" labels: version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m serviceAccount: spark-operator-spark executor: - cores: 1 - instances: 1 - memory: "512m" labels: version: 3.5.0 + instances: 1 + cores: 1 + coreLimit: 1200m + memory: 512m diff --git a/examples/spark-pi-schedule.yaml b/examples/spark-pi-scheduled.yaml similarity index 76% rename from examples/spark-pi-schedule.yaml rename to examples/spark-pi-scheduled.yaml index 576a77361..f74143e7c 100644 --- a/examples/spark-pi-schedule.yaml +++ b/examples/spark-pi-scheduled.yaml @@ -14,34 +14,35 @@ # limitations under the License. # -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: ScheduledSparkApplication metadata: name: spark-pi-scheduled namespace: default spec: - schedule: "@every 5m" + schedule: "@every 3m" concurrencyPolicy: Allow template: type: Scala mode: cluster - image: "spark:3.5.0" - imagePullPolicy: Always + image: spark:3.5.0 + imagePullPolicy: IfNotPresent mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" - sparkVersion: "3.5.0" + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 restartPolicy: type: Never driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" labels: version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m serviceAccount: spark-operator-spark executor: - cores: 1 - instances: 1 - memory: "512m" labels: version: 3.5.0 + instances: 1 + cores: 1 + coreLimit: 1200m + memory: 512m diff --git a/examples/spark-pi-volcano.yaml b/examples/spark-pi-volcano.yaml new file mode 100644 index 000000000..277ed173d --- /dev/null +++ b/examples/spark-pi-volcano.yaml @@ -0,0 +1,43 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + name: spark-pi-volcano + namespace: default +spec: + type: Scala + mode: cluster + image: spark:3.5.0 + imagePullPolicy: IfNotPresent + mainClass: org.apache.spark.examples.SparkPi + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 + driver: + labels: + version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m + serviceAccount: spark-operator-spark + executor: + labels: + version: 3.5.0 + instances: 2 + cores: 1 + coreLimit: 1200m + memory: 512m + batchScheduler: volcano diff --git a/examples/spark-pi.yaml b/examples/spark-pi.yaml index 41d48645e..6d7ae6869 100644 --- a/examples/spark-pi.yaml +++ b/examples/spark-pi.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: name: spark-pi @@ -21,37 +21,22 @@ metadata: spec: type: Scala mode: cluster - image: "spark:3.5.0" - imagePullPolicy: Always + image: spark:3.5.0 + imagePullPolicy: IfNotPresent mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" - sparkVersion: "3.5.0" - sparkUIOptions: - serviceLabels: - test-label/v1: 'true' - restartPolicy: - type: Never - volumes: - - name: "test-volume" - hostPath: - path: "/tmp" - type: Directory + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" labels: version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m serviceAccount: spark-operator-spark - volumeMounts: - - name: "test-volume" - mountPath: "/tmp" executor: - cores: 1 - instances: 1 - memory: "512m" labels: version: 3.5.0 - volumeMounts: - - name: "test-volume" - mountPath: "/tmp" + instances: 1 + cores: 1 + coreLimit: 1200m + memory: 512m diff --git a/go.mod b/go.mod index f44232d71..72c1d2548 100644 --- a/go.mod +++ b/go.mod @@ -1,134 +1,217 @@ module github.com/kubeflow/spark-operator -go 1.22 +go 1.22.5 require ( - cloud.google.com/go/storage v1.40.0 - github.com/aws/aws-sdk-go-v2 v1.26.1 - github.com/aws/aws-sdk-go-v2/config v1.27.11 - github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 - github.com/evanphx/json-patch v5.9.0+incompatible - github.com/golang/glog v1.2.1 + cloud.google.com/go/storage v1.43.0 + github.com/aws/aws-sdk-go-v2 v1.30.3 + github.com/aws/aws-sdk-go-v2/config v1.27.26 + github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 + github.com/golang/glog v1.2.2 github.com/google/uuid v1.6.0 github.com/olekukonko/tablewriter v0.0.5 - github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.0 - github.com/prometheus/client_model v0.6.1 + github.com/onsi/ginkgo/v2 v2.19.0 + github.com/onsi/gomega v1.33.1 + github.com/prometheus/client_golang v1.19.1 github.com/robfig/cron/v3 v3.0.1 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 + github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 + go.uber.org/zap v1.27.0 gocloud.dev v0.37.0 - golang.org/x/net v0.24.0 - golang.org/x/sync v0.7.0 - golang.org/x/time v0.5.0 - k8s.io/api v0.29.3 - k8s.io/apiextensions-apiserver v0.29.3 - k8s.io/apimachinery v0.29.3 + golang.org/x/net v0.27.0 + helm.sh/helm/v3 v3.15.3 + k8s.io/api v0.30.2 + k8s.io/apiextensions-apiserver v0.30.2 + k8s.io/apimachinery v0.30.2 k8s.io/client-go v1.5.2 - k8s.io/kubectl v0.29.3 - k8s.io/kubernetes v1.29.3 - k8s.io/utils v0.0.0-20240310230437-4693a0247e57 - volcano.sh/apis v1.8.2 + k8s.io/kubernetes v1.30.2 + k8s.io/utils v0.0.0-20240710235135-d4aae2beeffc + sigs.k8s.io/controller-runtime v0.17.5 + volcano.sh/apis v1.9.0 ) require ( - cloud.google.com/go v0.112.2 // indirect - cloud.google.com/go/compute v1.25.1 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.7 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/aws/aws-sdk-go v1.51.16 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect + cloud.google.com/go v0.115.0 // indirect + cloud.google.com/go/auth v0.7.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/iam v1.1.11 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/Microsoft/hcsshim v0.12.4 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go v1.54.18 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.26 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect - github.com/aws/smithy-go v1.20.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect + github.com/aws/smithy-go v1.20.3 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.12.0 // indirect - github.com/fatih/camelcase v1.0.0 // indirect + github.com/chai2010/gettext-go v1.0.3 // indirect + github.com/containerd/containerd v1.7.19 // indirect + github.com/containerd/errdefs v0.1.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cyphar/filepath-securejoin v0.2.5 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v27.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v27.0.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-metrics v0.0.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect + github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-errors/errors v1.4.2 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-errors/errors v1.5.1 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.2 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/wire v0.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.3 // indirect - github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect + github.com/googleapis/gax-go/v2 v2.12.5 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/gosuri/uitable v0.0.4 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/locker v1.0.1 // indirect + github.com/moby/spdystream v0.4.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.52.2 // indirect - github.com/prometheus/procfs v0.13.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect + github.com/rubenv/sql-migrate v1.7.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.6.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect - go.opentelemetry.io/otel v1.25.0 // indirect - go.opentelemetry.io/otel/metric v1.25.0 // indirect - go.opentelemetry.io/otel/trace v1.25.0 // indirect - go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - golang.org/x/crypto v0.22.0 // indirect - golang.org/x/oauth2 v0.19.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.starlark.net v0.0.0-20240705175910-70002002b310 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/exp v0.0.0-20240707233637-46b078467d37 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/term v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.23.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/api v0.172.0 // indirect - google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/grpc v1.63.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/api v0.188.0 // indirect + google.golang.org/genproto v0.0.0-20240709173604-40e1e62336c5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240709173604-40e1e62336c5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/cli-runtime v0.29.3 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 // indirect + k8s.io/apiserver v0.30.2 // indirect + k8s.io/cli-runtime v0.30.2 // indirect + k8s.io/component-base v0.30.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f // indirect + k8s.io/kubectl v0.30.2 // indirect + oras.land/oras-go v1.2.5 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect - sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/api v0.17.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 774d575a5..a5bb11dd5 100644 --- a/go.sum +++ b/go.sum @@ -1,112 +1,216 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= -cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= -cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= -cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= -cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= -cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= -cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts= +cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw= +cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= +cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/iam v1.1.11 h1:0mQ8UKSfdHLut6pH9FM3bI55KWR46ketn0PuXleDyxw= +cloud.google.com/go/iam v1.1.11/go.mod h1:biXoiLWYIKntto2joP+62sd9uW5EpkZmKIvfNcTWlnQ= +cloud.google.com/go/longrunning v0.5.9 h1:haH9pAuXdPAMqHvzX0zlWQigXT7B0+CL4/2nXXdBo5k= +cloud.google.com/go/longrunning v0.5.9/go.mod h1:HD+0l9/OOW0za6UWdKJtXoFAX/BGg/3Wj8p10NeWF7c= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.12.4 h1:Ev7YUMHAHoWNm+aDSPzc5W9s6E2jyL1szpVDJeZ/Rr4= +github.com/Microsoft/hcsshim v0.12.4/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aws/aws-sdk-go v1.51.16 h1:vnWKK8KjbftEkuPX8bRj3WHsLy1uhotn0eXptpvrxJI= -github.com/aws/aws-sdk-go v1.51.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= -github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA= -github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE= -github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 h1:7Zwtt/lP3KNRkeZre7soMELMGNoBrutx8nobg1jKWmo= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15/go.mod h1:436h2adoHb57yd+8W+gYPrrA9U/R/SuAuOO42Ushzhw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.54.18 h1:t8DGtN8A2wEiazoJxeDbfPsbxCKtjoRLuO7jBSgJzo4= +github.com/aws/aws-sdk-go v1.54.18/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= +github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= +github.com/aws/aws-sdk-go-v2/config v1.27.26 h1:T1kAefbKuNum/AbShMsZEro6eRkeOT8YILfE9wyjAYQ= +github.com/aws/aws-sdk-go-v2/config v1.27.26/go.mod h1:ivWHkAWFrw/nxty5Fku7soTIVdqZaZ7dw+tc5iGW3GA= +github.com/aws/aws-sdk-go-v2/credentials v1.17.26 h1:tsm8g/nJxi8+/7XyJJcP2dLrnK/5rkFp6+i2nhmz5fk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.26/go.mod h1:3vAM49zkIa3q8WT6o9Ve5Z0vdByDMwmdScO0zvThTgI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 h1:kNemAUX+bJFBSfPkGVZ8HFOKIadjLoI2Ua1ZKivhGSo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7/go.mod h1:71S2C1g/Zjn+ANmyoOqJ586OrPF9uC9iiHt9ZAT+MOw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= -github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= -github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALwfMWpd64tONS/NE= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 h1:Fv1vD2L65Jnp5QRsdiM64JvUM4Xe+E0JyVsRQKv6IeA= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.3/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= +github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= +github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80= +github.com/chai2010/gettext-go v1.0.3/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= +github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= +github.com/containerd/containerd v1.7.19 h1:/xQ4XRJ0tamDkdzrrBAUy/LE5nCcxFKdBm4EcPrSMEE= +github.com/containerd/containerd v1.7.19/go.mod h1:h4FtNYUUMB4Phr6v+xG89RYKj9XccvbNSCKjdufCrkc= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= +github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= +github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= -github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v27.0.3+incompatible h1:usGs0/BoBW8MWxGeEtqPMkzOY56jZ6kYlSN5BLDioCQ= +github.com/docker/cli v27.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= +github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -117,8 +221,10 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -126,7 +232,6 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -138,15 +243,16 @@ github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0 h1:e+8XbKB6IMn8A4OAyZccO4pYfB3s7bt6azNIPE7AnPg= +github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -154,11 +260,31 @@ github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q= -github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -167,59 +293,125 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= -github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= -github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= -github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -227,57 +419,109 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI= +github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0 h1:zvpPXY7RfYAGSdYQLjp6zxdJNSYD/+FFoCTQN9IPxBs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0/go.mod h1:BMn8NB1vsxTljvuorms2hyOs8IBuuBEq0pl7ltOfy30= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8= -go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= -go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= -go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= -go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= -go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= -go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= -go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= -go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= -go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= -go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.starlark.net v0.0.0-20240705175910-70002002b310 h1:tEAOMoNmN2MqVNi0MMEWpTtPI4YNCXgxmAGtuv3mST0= +go.starlark.net v0.0.0-20240705175910-70002002b310/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro= gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240707233637-46b078467d37 h1:uLDX+AfeFCct3a2C7uIWBKMJIR3CJMhcgfrUAqjRK6w= +golang.org/x/exp v0.0.0-20240707233637-46b078467d37/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -289,26 +533,30 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= -golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -319,8 +567,11 @@ golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -328,29 +579,34 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -365,34 +621,36 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= -google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw= +google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= -google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda h1:b6F6WIV4xHHD0FA4oIyzU6mHWg2WI2X1RBehwa5QN38= -google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda/go.mod h1:AHcE/gZH76Bk/ROZhQphlRoWo5xKDEtz3eVEO1LfA8c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20240709173604-40e1e62336c5 h1:ORprMx6Xqr56pGwKXMnVEFBI0k7OIcHI0Rx92/rKypo= +google.golang.org/genproto v0.0.0-20240709173604-40e1e62336c5/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY= +google.golang.org/genproto/googleapis/api v0.0.0-20240709173604-40e1e62336c5 h1:a/Z0jgw03aJ2rQnp5PlPpznJqJft0HyvyrcUcxgzPwY= +google.golang.org/genproto/googleapis/api v0.0.0-20240709173604-40e1e62336c5/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 h1:SbSDUWW1PAO24TNpLdeheoYPd7kllICcLU52x6eD4kQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= -google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -402,19 +660,31 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +helm.sh/helm/v3 v3.15.3 h1:HcZDaVFe9uHa6hpsR54mJjYyRy4uz/pc6csg27nxFOc= +helm.sh/helm/v3 v3.15.3/go.mod h1:FzSIP8jDQaa6WAVg9F+OkKz7J0ZmAga4MABtTbsb9WQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= @@ -423,29 +693,37 @@ k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= +k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k= k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM= k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 h1:w6nThEmGo9zcL+xH1Tu6pjxJ3K1jXFW+V0u4peqN8ks= -k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= +k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f h1:2sXuKesAYbRHxL3aE2PN6zX/gcJr22cjrsej+W784Tc= +k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= -k8s.io/kubernetes v1.29.3 h1:EuOAKN4zpiP+kBx/0e9yS5iBkPSyLml19juOqZxBtDw= -k8s.io/kubernetes v1.29.3/go.mod h1:CP+Z+S9haxyB7J+nV6ywYry4dqlphArPXjcc0CsBVXc= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kubernetes v1.30.2 h1:11WhS78OYX/lnSy6TXxPO6Hk+E5K9ZNrEsk9JgMSX8I= +k8s.io/kubernetes v1.30.2/go.mod h1:yPbIk3MhmhGigX62FLJm+CphNtjxqCvAIFQXup6RKS0= +k8s.io/utils v0.0.0-20240710235135-d4aae2beeffc h1:sAWhW/i0Lsz5ZUgeE9svkFa4UyoA+LNAsPcWnwQ2PzM= +k8s.io/utils v0.0.0-20240710235135-d4aae2beeffc/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= +oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= +sigs.k8s.io/controller-runtime v0.17.5 h1:1FI9Lm7NiOOmBsgTV36/s2XrEFXnO2C4sbg/Zme72Rw= +sigs.k8s.io/controller-runtime v0.17.5/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= +sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= +sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= +sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= +sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -volcano.sh/apis v1.8.2 h1:MJ1EXpdQeKG+XEhb/I3liWgMFzkgW3qCcj6qdhTuvfA= -volcano.sh/apis v1.8.2/go.mod h1:h+xbUpkjfRaHjktAi8h+7JNnNahjwhRSgpN9FUUwNXQ= +volcano.sh/apis v1.9.0 h1:e+9yEbQOi6HvgaayAxYULT6n+59mkYvmqjKhp9Z06sY= +volcano.sh/apis v1.9.0/go.mod h1:yXNfsZRzAOq6EUyPJYFrlMorh1XsYQGonGWyr4IiznM= diff --git a/hack/api-docs/Dockerfile b/hack/api-docs/Dockerfile deleted file mode 100644 index 15de4a02f..000000000 --- a/hack/api-docs/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -FROM golang:1.22.2-alpine -RUN go install github.com/ahmetb/gen-crd-api-reference-docs@latest diff --git a/hack/api-docs/api-docs-config.json b/hack/api-docs/api-docs-config.json deleted file mode 100644 index e8dafceab..000000000 --- a/hack/api-docs/api-docs-config.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "hideMemberFields": [ - "TypeMeta" - ], - "hideTypePatterns": [ - "ParseError$", - "List$" - ], - "externalPackages": [ - { - "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$", - "docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration" - }, - { - "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/", - "docsURLTemplate": "https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}" - }, - { - "typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/", - "docsURLTemplate": "https://pkg.go.dev/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}" - } - ], - "typeDisplayNamePrefixOverrides": { - "k8s.io/api/": "Kubernetes ", - "k8s.io/apimachinery/pkg/apis/": "Kubernetes " - }, - "markdownDisabled": false -} \ No newline at end of file diff --git a/hack/api-docs/config.json b/hack/api-docs/config.json new file mode 100644 index 000000000..bdeefef44 --- /dev/null +++ b/hack/api-docs/config.json @@ -0,0 +1,28 @@ +{ + "hideMemberFields": [ + "TypeMeta" + ], + "hideTypePatterns": [ + "ParseError$", + "List$" + ], + "externalPackages": [ + { + "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$", + "docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration" + }, + { + "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/", + "docsURLTemplate": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}" + }, + { + "typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/", + "docsURLTemplate": "https://pkg.go.dev/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}" + } + ], + "typeDisplayNamePrefixOverrides": { + "k8s.io/api/": "Kubernetes ", + "k8s.io/apimachinery/pkg/apis/": "Kubernetes " + }, + "markdownDisabled": false +} \ No newline at end of file diff --git a/hack/api-docs/api-docs-template/members.tpl b/hack/api-docs/template/members.tpl similarity index 100% rename from hack/api-docs/api-docs-template/members.tpl rename to hack/api-docs/template/members.tpl diff --git a/hack/api-docs/api-docs-template/pkg.tpl b/hack/api-docs/template/pkg.tpl similarity index 85% rename from hack/api-docs/api-docs-template/pkg.tpl rename to hack/api-docs/template/pkg.tpl index b03885602..45afec04c 100644 --- a/hack/api-docs/api-docs-template/pkg.tpl +++ b/hack/api-docs/template/pkg.tpl @@ -42,7 +42,7 @@ {{ end }}

    - Generated with https://github.com/ahmetb/gen-crd-api-reference-docs.git on git commit ccf856504caaeac38151b57a950d3f8a7942b9db. + Generated with gen-crd-api-reference-docs.

    {{ end }} \ No newline at end of file diff --git a/hack/api-docs/api-docs-template/placeholder.go b/hack/api-docs/template/placeholder.go similarity index 100% rename from hack/api-docs/api-docs-template/placeholder.go rename to hack/api-docs/template/placeholder.go diff --git a/hack/api-docs/api-docs-template/type.tpl b/hack/api-docs/template/type.tpl similarity index 100% rename from hack/api-docs/api-docs-template/type.tpl rename to hack/api-docs/template/type.tpl diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 000000000..c4bd8c393 --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/pkg/controller/doc.go b/internal/controller/doc.go similarity index 95% rename from pkg/controller/doc.go rename to internal/controller/doc.go index b1992cd8e..f83ab071d 100644 --- a/pkg/controller/doc.go +++ b/internal/controller/doc.go @@ -1,11 +1,11 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/internal/controller/mutatingwebhookconfiguration/controller.go b/internal/controller/mutatingwebhookconfiguration/controller.go new file mode 100644 index 000000000..946a57841 --- /dev/null +++ b/internal/controller/mutatingwebhookconfiguration/controller.go @@ -0,0 +1,99 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutatingwebhookconfiguration + +import ( + "context" + "fmt" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/pkg/certificate" +) + +var ( + logger = ctrl.Log.WithName("") +) + +// Reconciler reconciles a webhook configuration object. +type Reconciler struct { + client client.Client + certProvider *certificate.Provider + name string +} + +// MutatingWebhookConfigurationReconciler implements reconcile.Reconciler. +var _ reconcile.Reconciler = &Reconciler{} + +// NewReconciler creates a new MutatingWebhookConfigurationReconciler instance. +func NewReconciler(client client.Client, certProvider *certificate.Provider, name string) *Reconciler { + return &Reconciler{ + client: client, + certProvider: certProvider, + name: name, + } +} + +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + Named("mutating-webhook-configuration-controller"). + Watches( + &admissionregistrationv1.MutatingWebhookConfiguration{}, + NewEventHandler(), + builder.WithPredicates( + NewEventFilter(r.name), + ), + ). + WithOptions(options). + Complete(r) +} + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger.Info("Updating CA bundle of MutatingWebhookConfiguration", "name", req.Name) + if err := r.updateMutatingWebhookConfiguration(ctx, req.NamespacedName); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) updateMutatingWebhookConfiguration(ctx context.Context, key types.NamespacedName) error { + webhook := &admissionregistrationv1.MutatingWebhookConfiguration{} + if err := r.client.Get(ctx, key, webhook); err != nil { + return fmt.Errorf("failed to get mutating webhook configuration %v: %v", key, err) + } + + caBundle, err := r.certProvider.CACert() + if err != nil { + return fmt.Errorf("failed to get CA certificate: %v", err) + } + + newWebhook := webhook.DeepCopy() + for i := range newWebhook.Webhooks { + newWebhook.Webhooks[i].ClientConfig.CABundle = caBundle + } + if err := r.client.Update(ctx, newWebhook); err != nil { + return fmt.Errorf("failed to update mutating webhook configuration %v: %v", key, err) + } + + return nil +} diff --git a/internal/controller/mutatingwebhookconfiguration/event_filter.go b/internal/controller/mutatingwebhookconfiguration/event_filter.go new file mode 100644 index 000000000..64131300b --- /dev/null +++ b/internal/controller/mutatingwebhookconfiguration/event_filter.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutatingwebhookconfiguration + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// EventFilter filters events for MutatingWebhookConfiguration. +type EventFilter struct { + name string +} + +func NewEventFilter(name string) *EventFilter { + return &EventFilter{ + name: name, + } +} + +// MutatingWebhookConfigurationEventFilter implements predicate.Predicate. +var _ predicate.Predicate = &EventFilter{} + +// Create implements predicate.Predicate. +func (f *EventFilter) Create(e event.CreateEvent) bool { + return e.Object.GetName() == f.name +} + +// Update implements predicate.Predicate. +func (f *EventFilter) Update(e event.UpdateEvent) bool { + return e.ObjectOld.GetName() == f.name +} + +// Delete implements predicate.Predicate. +func (f *EventFilter) Delete(event.DeleteEvent) bool { + return false +} + +// Generic implements predicate.Predicate. +func (f *EventFilter) Generic(event.GenericEvent) bool { + return false +} diff --git a/internal/controller/mutatingwebhookconfiguration/event_handler.go b/internal/controller/mutatingwebhookconfiguration/event_handler.go new file mode 100644 index 000000000..f9c883506 --- /dev/null +++ b/internal/controller/mutatingwebhookconfiguration/event_handler.go @@ -0,0 +1,102 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutatingwebhookconfiguration + +import ( + "context" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" +) + +// EventHandler handles MutatingWebhookConfiguration events. +type EventHandler struct{} + +var _ handler.EventHandler = &EventHandler{} + +// NewEventHandler creates a new MutatingWebhookConfigurationEventHandler instance. +func NewEventHandler() *EventHandler { + return &EventHandler{} +} + +// Create implements handler.EventHandler. +func (h *EventHandler) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { + mwc, ok := event.Object.(*admissionregistrationv1.MutatingWebhookConfiguration) + if !ok { + return + } + logger.Info("MutatingWebhookConfiguration created", "name", mwc.Name) + key := types.NamespacedName{ + Namespace: mwc.Namespace, + Name: mwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Update implements handler.EventHandler. +func (h *EventHandler) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { + oldWebhook, ok := event.ObjectOld.(*admissionregistrationv1.MutatingWebhookConfiguration) + if !ok { + return + } + newWebhook, ok := event.ObjectNew.(*admissionregistrationv1.MutatingWebhookConfiguration) + if !ok { + return + } + if newWebhook.ResourceVersion == oldWebhook.ResourceVersion { + return + } + + logger.Info("MutatingWebhookConfiguration updated", "name", newWebhook.Name, "namespace", newWebhook.Namespace) + key := types.NamespacedName{ + Namespace: newWebhook.Namespace, + Name: newWebhook.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Delete implements handler.EventHandler. +func (h *EventHandler) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { + mwc, ok := event.Object.(*admissionregistrationv1.MutatingWebhookConfiguration) + if !ok { + return + } + logger.Info("MutatingWebhookConfiguration deleted", "name", mwc.Name, "namespace", mwc.Namespace) + key := types.NamespacedName{ + Namespace: mwc.Namespace, + Name: mwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Generic implements handler.EventHandler. +func (h *EventHandler) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { + mwc, ok := event.Object.(*admissionregistrationv1.MutatingWebhookConfiguration) + if !ok { + return + } + logger.Info("MutatingWebhookConfiguration generic event", "name", mwc.Name, "namespace", mwc.Namespace) + key := types.NamespacedName{ + Namespace: mwc.Namespace, + Name: mwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} diff --git a/internal/controller/scheduledsparkapplication/controller.go b/internal/controller/scheduledsparkapplication/controller.go new file mode 100644 index 000000000..c8abb3af6 --- /dev/null +++ b/internal/controller/scheduledsparkapplication/controller.go @@ -0,0 +1,377 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduledsparkapplication + +import ( + "context" + "fmt" + "reflect" + "sort" + "time" + + "github.com/robfig/cron/v3" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/utils/clock" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var ( + logger = log.Log.WithName("") +) + +type Options struct { + Namespaces []string +} + +// Reconciler reconciles a ScheduledSparkApplication object +type Reconciler struct { + scheme *runtime.Scheme + client client.Client + recorder record.EventRecorder + clock clock.Clock + options Options +} + +var _ reconcile.Reconciler = &Reconciler{} + +func NewReconciler( + scheme *runtime.Scheme, + client client.Client, + recorder record.EventRecorder, + clock clock.Clock, + options Options, +) *Reconciler { + return &Reconciler{ + scheme: scheme, + client: client, + recorder: recorder, + clock: clock, + options: options, + } +} + +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=scheduledsparkapplications,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=scheduledsparkapplications/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=scheduledsparkapplications/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the ScheduledSparkApplication object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.2/pkg/reconcile +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + oldScheduledApp, err := r.getScheduledSparkApplication(ctx, key) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{Requeue: true}, err + } + scheduledApp := oldScheduledApp.DeepCopy() + logger.Info("Reconciling ScheduledSparkApplication", "name", scheduledApp.Name, "namespace", scheduledApp.Namespace, "state", scheduledApp.Status.ScheduleState) + + if scheduledApp.Spec.Suspend != nil && *scheduledApp.Spec.Suspend { + return ctrl.Result{}, nil + } + + schedule, parseErr := cron.ParseStandard(scheduledApp.Spec.Schedule) + if parseErr != nil { + logger.Error(err, "Failed to parse schedule of ScheduledSparkApplication", "name", scheduledApp.Name, "namespace", scheduledApp.Namespace, "schedule", scheduledApp.Spec.Schedule) + scheduledApp.Status.ScheduleState = v1beta2.ScheduleStateFailedValidation + scheduledApp.Status.Reason = parseErr.Error() + if updateErr := r.updateScheduledSparkApplicationStatus(ctx, scheduledApp); updateErr != nil { + return ctrl.Result{Requeue: true}, updateErr + } + return ctrl.Result{}, nil + } + + switch scheduledApp.Status.ScheduleState { + case v1beta2.ScheduleStateNew: + now := r.clock.Now() + oldNextRunTime := scheduledApp.Status.NextRun.Time + nextRunTime := schedule.Next(now) + if oldNextRunTime.IsZero() || nextRunTime.Before(oldNextRunTime) { + scheduledApp.Status.NextRun = metav1.NewTime(nextRunTime) + } + scheduledApp.Status.ScheduleState = v1beta2.ScheduleStateScheduled + if err := r.updateScheduledSparkApplicationStatus(ctx, scheduledApp); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{RequeueAfter: nextRunTime.Sub(now)}, err + case v1beta2.ScheduleStateScheduled: + now := r.clock.Now() + nextRunTime := scheduledApp.Status.NextRun + if nextRunTime.IsZero() { + scheduledApp.Status.NextRun = metav1.NewTime(schedule.Next(now)) + if err := r.updateScheduledSparkApplicationStatus(ctx, scheduledApp); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{RequeueAfter: schedule.Next(now).Sub(now)}, nil + } + + if nextRunTime.Time.After(now) { + return ctrl.Result{RequeueAfter: nextRunTime.Time.Sub(now)}, nil + } + + ok, err := r.shouldStartNextRun(scheduledApp) + if err != nil { + return ctrl.Result{Requeue: true}, err + } + if !ok { + return ctrl.Result{RequeueAfter: schedule.Next(now).Sub(now)}, nil + } + + logger.Info("Next run of ScheduledSparkApplication is due", "name", scheduledApp.Name, "namespace", scheduledApp.Namespace) + app, err := r.startNextRun(scheduledApp, now) + if err != nil { + logger.Error(err, "Failed to start next run for ScheduledSparkApplication", "name", scheduledApp.Name, "namespace", scheduledApp.Namespace) + return ctrl.Result{RequeueAfter: schedule.Next(now).Sub(now)}, err + } + + scheduledApp.Status.LastRun = metav1.NewTime(now) + scheduledApp.Status.LastRunName = app.Name + scheduledApp.Status.NextRun = metav1.NewTime(schedule.Next(now)) + if err = r.checkAndUpdatePastRuns(ctx, scheduledApp); err != nil { + return ctrl.Result{Requeue: true}, err + } + if err := r.updateScheduledSparkApplicationStatus(ctx, scheduledApp); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{RequeueAfter: schedule.Next(now).Sub(now)}, nil + case v1beta2.ScheduleStateFailedValidation: + return ctrl.Result{}, nil + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + Named("scheduled-spark-application-controller"). + Watches( + &v1beta2.ScheduledSparkApplication{}, + NewEventHandler(), + builder.WithPredicates( + NewEventFilter(r.options.Namespaces), + )). + WithOptions(options). + Complete(r) +} + +func (r *Reconciler) getScheduledSparkApplication(ctx context.Context, key types.NamespacedName) (*v1beta2.ScheduledSparkApplication, error) { + app := &v1beta2.ScheduledSparkApplication{} + if err := r.client.Get(ctx, key, app); err != nil { + return nil, err + } + return app, nil +} + +func (r *Reconciler) createSparkApplication( + scheduledApp *v1beta2.ScheduledSparkApplication, + t time.Time, +) (*v1beta2.SparkApplication, error) { + labels := map[string]string{ + common.LabelScheduledSparkAppName: scheduledApp.Name, + } + for key, value := range scheduledApp.Labels { + labels[key] = value + } + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", scheduledApp.Name, t.UnixNano()), + Namespace: scheduledApp.Namespace, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1beta2.SchemeGroupVersion.String(), + Kind: reflect.TypeOf(v1beta2.ScheduledSparkApplication{}).Name(), + Name: scheduledApp.Name, + UID: scheduledApp.UID, + BlockOwnerDeletion: util.BoolPtr(true), + }}, + }, + Spec: scheduledApp.Spec.Template, + } + if err := r.client.Create(context.TODO(), app); err != nil { + return nil, err + } + return app, nil +} + +// shouldStartNextRun checks if the next run should be started. +func (r *Reconciler) shouldStartNextRun(scheduledApp *v1beta2.ScheduledSparkApplication) (bool, error) { + apps, err := r.listSparkApplications(scheduledApp) + if err != nil { + return false, err + } + if len(apps) == 0 { + return true, nil + } + + sortSparkApplicationsInPlace(apps) + // The last run (most recently started) is the first one in the sorted slice. + lastRun := apps[0] + switch scheduledApp.Spec.ConcurrencyPolicy { + case v1beta2.ConcurrencyAllow: + return true, nil + case v1beta2.ConcurrencyForbid: + return r.hasLastRunFinished(lastRun), nil + case v1beta2.ConcurrencyReplace: + if err := r.killLastRunIfNotFinished(lastRun); err != nil { + return false, err + } + return true, nil + } + return false, nil +} + +func (r *Reconciler) startNextRun(scheduledApp *v1beta2.ScheduledSparkApplication, now time.Time) (*v1beta2.SparkApplication, error) { + app, err := r.createSparkApplication(scheduledApp, now) + if err != nil { + return nil, err + } + return app, nil +} + +func (r *Reconciler) hasLastRunFinished(app *v1beta2.SparkApplication) bool { + return app.Status.AppState.State == v1beta2.ApplicationStateCompleted || + app.Status.AppState.State == v1beta2.ApplicationStateFailed +} + +func (r *Reconciler) killLastRunIfNotFinished(app *v1beta2.SparkApplication) error { + finished := r.hasLastRunFinished(app) + if finished { + return nil + } + + // Delete the SparkApplication object of the last run. + if err := r.client.Delete(context.TODO(), app, client.GracePeriodSeconds(0)); err != nil { + return err + } + + return nil +} + +func (r *Reconciler) checkAndUpdatePastRuns(ctx context.Context, scheduledApp *v1beta2.ScheduledSparkApplication) error { + apps, err := r.listSparkApplications(scheduledApp) + if err != nil { + return err + } + + var completedApps []*v1beta2.SparkApplication + var failedApps []*v1beta2.SparkApplication + for _, app := range apps { + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + completedApps = append(completedApps, app) + } else if app.Status.AppState.State == v1beta2.ApplicationStateFailed { + failedApps = append(failedApps, app) + } + } + + historyLimit := 1 + if scheduledApp.Spec.SuccessfulRunHistoryLimit != nil { + historyLimit = int(*scheduledApp.Spec.SuccessfulRunHistoryLimit) + } + + toKeep, toDelete := bookkeepPastRuns(completedApps, historyLimit) + scheduledApp.Status.PastSuccessfulRunNames = []string{} + for _, app := range toKeep { + scheduledApp.Status.PastSuccessfulRunNames = append(scheduledApp.Status.PastSuccessfulRunNames, app.Name) + } + for _, app := range toDelete { + if err := r.client.Delete(ctx, app, client.GracePeriodSeconds(0)); err != nil { + return err + } + } + + historyLimit = 1 + if scheduledApp.Spec.FailedRunHistoryLimit != nil { + historyLimit = int(*scheduledApp.Spec.FailedRunHistoryLimit) + } + toKeep, toDelete = bookkeepPastRuns(failedApps, historyLimit) + scheduledApp.Status.PastFailedRunNames = []string{} + for _, app := range toKeep { + scheduledApp.Status.PastFailedRunNames = append(scheduledApp.Status.PastFailedRunNames, app.Name) + } + for _, app := range toDelete { + if err := r.client.Delete(ctx, app, client.GracePeriodSeconds(0)); err != nil { + return err + } + } + + return nil +} + +func (r *Reconciler) updateScheduledSparkApplicationStatus(ctx context.Context, scheduledApp *v1beta2.ScheduledSparkApplication) error { + // logger.Info("Updating SchedulingSparkApplication", "name", scheduledApp.Name, "namespace", scheduledApp.Namespace, "status", scheduledApp.Status) + if err := r.client.Status().Update(ctx, scheduledApp); err != nil { + return fmt.Errorf("failed to update ScheduledSparkApplication status: %v", err) + } + + return nil +} + +// listSparkApplications lists SparkApplications that are owned by the given ScheduledSparkApplication and sort them by decreasing order of creation timestamp. +func (r *Reconciler) listSparkApplications(app *v1beta2.ScheduledSparkApplication) ([]*v1beta2.SparkApplication, error) { + set := labels.Set{common.LabelScheduledSparkAppName: app.Name} + appList := &v1beta2.SparkApplicationList{} + if err := r.client.List(context.TODO(), appList, client.InNamespace(app.Namespace), client.MatchingLabels(set)); err != nil { + return nil, fmt.Errorf("failed to list SparkApplications: %v", err) + } + apps := []*v1beta2.SparkApplication{} + for _, item := range appList.Items { + apps = append(apps, &item) + } + return apps, nil +} + +// sortSparkApplicationsInPlace sorts the given slice of SparkApplication in place by the decreasing order of creation timestamp. +func sortSparkApplicationsInPlace(apps []*v1beta2.SparkApplication) { + sort.Slice(apps, func(i, j int) bool { + return apps[i].CreationTimestamp.After(apps[j].CreationTimestamp.Time) + }) +} + +// bookkeepPastRuns bookkeeps the past runs of the given SparkApplication slice. +func bookkeepPastRuns(apps []*v1beta2.SparkApplication, limit int) ([]*v1beta2.SparkApplication, []*v1beta2.SparkApplication) { + if len(apps) <= limit { + return apps, nil + } + sortSparkApplicationsInPlace(apps) + toKeep := apps[:limit] + toDelete := apps[limit:] + return toKeep, toDelete +} diff --git a/internal/controller/scheduledsparkapplication/controller_test.go b/internal/controller/scheduledsparkapplication/controller_test.go new file mode 100644 index 000000000..fd95d302d --- /dev/null +++ b/internal/controller/scheduledsparkapplication/controller_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduledsparkapplication + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/clock" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +var _ = Describe("ScheduledSparkApplication Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + scheduledsparkapplication := &v1beta2.ScheduledSparkApplication{} + + BeforeEach(func() { + By("creating the custom resource for the Kind ScheduledSparkApplication") + err := k8sClient.Get(ctx, typeNamespacedName, scheduledsparkapplication) + if err != nil && errors.IsNotFound(err) { + resource := &v1beta2.ScheduledSparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: v1beta2.ScheduledSparkApplicationSpec{ + Schedule: "@every 1m", + ConcurrencyPolicy: v1beta2.ConcurrencyAllow, + Template: v1beta2.SparkApplicationSpec{ + Type: v1beta2.SparkApplicationTypeScala, + Mode: v1beta2.DeployModeCluster, + RestartPolicy: v1beta2.RestartPolicy{ + Type: v1beta2.RestartPolicyNever, + }, + }, + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &v1beta2.ScheduledSparkApplication{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance ScheduledSparkApplication") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + reconciler := NewReconciler(k8sClient.Scheme(), k8sClient, nil, clock.RealClock{}, Options{Namespaces: []string{"default"}}) + _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typeNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/scheduledsparkapplication/event_filter.go b/internal/controller/scheduledsparkapplication/event_filter.go new file mode 100644 index 000000000..e6ea5487b --- /dev/null +++ b/internal/controller/scheduledsparkapplication/event_filter.go @@ -0,0 +1,81 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduledsparkapplication + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +// EventFilter filters out ScheduledSparkApplication events. +type EventFilter struct { + namespaces map[string]bool +} + +// EventHandler handles ScheduledSparkApplication events. +var _ predicate.Predicate = &EventFilter{} + +// NewEventFilter creates a new EventFilter instance. +func NewEventFilter(namespaces []string) *EventFilter { + nsMap := make(map[string]bool) + for _, ns := range namespaces { + nsMap[ns] = true + } + return &EventFilter{ + namespaces: nsMap, + } +} + +// Create implements predicate.Predicate. +func (f *EventFilter) Create(e event.CreateEvent) bool { + app, ok := e.Object.(*v1beta2.ScheduledSparkApplication) + if !ok { + return false + } + return f.filter(app) +} + +// Update implements predicate.Predicate. +func (f *EventFilter) Update(e event.UpdateEvent) bool { + newApp, ok := e.ObjectNew.(*v1beta2.ScheduledSparkApplication) + if !ok { + return false + } + + return f.filter(newApp) +} + +// Delete implements predicate.Predicate. +func (f *EventFilter) Delete(_ event.DeleteEvent) bool { + return false +} + +// Generic implements predicate.Predicate. +func (f *EventFilter) Generic(e event.GenericEvent) bool { + app, ok := e.Object.(*v1beta2.ScheduledSparkApplication) + if !ok { + return false + } + return f.filter(app) +} + +func (f *EventFilter) filter(app *v1beta2.ScheduledSparkApplication) bool { + return f.namespaces[metav1.NamespaceAll] || f.namespaces[app.Namespace] +} diff --git a/internal/controller/scheduledsparkapplication/event_handler.go b/internal/controller/scheduledsparkapplication/event_handler.go new file mode 100644 index 000000000..92127ac01 --- /dev/null +++ b/internal/controller/scheduledsparkapplication/event_handler.go @@ -0,0 +1,85 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduledsparkapplication + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +// EventHandler handles events for ScheduledSparkApplication. +type EventHandler struct { +} + +// EventHandler implements handler.EventHandler. +var _ handler.EventHandler = &EventHandler{} + +// NewEventHandler creates a new EventHandler instance +func NewEventHandler() *EventHandler { + return &EventHandler{} +} + +// Create implements handler.EventHandler. +func (s *EventHandler) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.ScheduledSparkApplication) + if !ok { + return + } + + logger.V(1).Info("ScheduledSparkApplication created", "name", app.Name, "namespace", app.Namespace) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) +} + +// Update implements handler.EventHandler. +func (s *EventHandler) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { + oldApp, ok := event.ObjectOld.(*v1beta2.ScheduledSparkApplication) + if !ok { + return + } + + logger.V(1).Info("ScheduledSparkApplication updated", "name", oldApp.Name, "namespace", oldApp.Namespace) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: oldApp.Name, Namespace: oldApp.Namespace}}) +} + +// Delete implements handler.EventHandler. +func (s *EventHandler) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.ScheduledSparkApplication) + if !ok { + return + } + + logger.V(1).Info("ScheduledSparkApplication deleted", "name", app.Name, "namespace", app.Namespace) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) +} + +// Generic implements handler.EventHandler. +func (s *EventHandler) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.ScheduledSparkApplication) + if !ok { + return + } + + logger.V(1).Info("ScheduledSparkApplication generic event", "name", app.Name, "namespace", app.Namespace) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) +} diff --git a/internal/controller/scheduledsparkapplication/suite_test.go b/internal/controller/scheduledsparkapplication/suite_test.go new file mode 100644 index 000000000..2a98ffa90 --- /dev/null +++ b/internal/controller/scheduledsparkapplication/suite_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduledsparkapplication + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "..", "bin", "k8s", + fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = v1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/internal/controller/sparkapplication/controller.go b/internal/controller/sparkapplication/controller.go new file mode 100644 index 000000000..753108a90 --- /dev/null +++ b/internal/controller/sparkapplication/controller.go @@ -0,0 +1,1217 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + "fmt" + "time" + + "github.com/golang/glog" + "github.com/google/uuid" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/metrics" + "github.com/kubeflow/spark-operator/internal/scheduler" + "github.com/kubeflow/spark-operator/internal/scheduler/volcano" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var ( + logger = log.Log.WithName("") +) + +// Options defines the options of the controller. +type Options struct { + Namespaces []string + EnableUIService bool + IngressClassName string + IngressURLFormat string + + SparkApplicationMetrics *metrics.SparkApplicationMetrics + SparkExecutorMetrics *metrics.SparkExecutorMetrics +} + +// Reconciler reconciles a SparkApplication object. +type Reconciler struct { + manager ctrl.Manager + scheme *runtime.Scheme + client client.Client + recorder record.EventRecorder + options Options + registry *scheduler.Registry +} + +// Reconciler implements reconcile.Reconciler. +var _ reconcile.Reconciler = &Reconciler{} + +// NewReconciler creates a new Reconciler instance. +func NewReconciler( + manager ctrl.Manager, + scheme *runtime.Scheme, + client client.Client, + recorder record.EventRecorder, + registry *scheduler.Registry, + options Options, +) *Reconciler { + return &Reconciler{ + manager: manager, + scheme: scheme, + client: client, + recorder: recorder, + registry: registry, + options: options, + } +} + +// +kubebuilder:rbac:groups=,resources=pods,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups=,resources=configmaps,verbs=get;list;create;update;patch;delete +// +kubebuilder:rbac:groups=,resources=services,verbs=get;create;delete +// +kubebuilder:rbac:groups=,resources=nodes,verbs=get +// +kubebuilder:rbac:groups=,resources=events,verbs=create;update;patch +// +kubebuilder:rbac:groups=,resources=resourcequotas,verbs=get;list;watch +// +kubebuilder:rbac:groups=extensions,resources=ingresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=sparkapplications,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=sparkapplications/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=sparkapplications/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the SparkApplication object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.2/pkg/reconcile + +// Reconcile handles Create, Update and Delete events of the custom resource. +// State Machine for SparkApplication: +// +--------------------------------------------------------------------------------------------------------------------+ +// | +---------------------------------------------------------------------------------------------+ | +// | | +----------+ | | +// | | | | | | +// | | | | | | +// | | |Submission| | | +// | | +----> Failed +----+------------------------------------------------------------------+ | | +// | | | | | | | | | +// | | | | | | | | | +// | | | +----^-----+ | +-----------------------------------------+ | | | +// | | | | | | | | | | +// | | | | | | | | | | +// | +-+--+----+ | +-----v--+-+ +----------+ +-----v-----+ +----v--v--+ | +// | | | | | | | | | | | | | +// | | | | | | | | | | | | | +// | | New +---------> Submitted+----------> Running +-----------> Failing +----------> Failed | | +// | | | | | | | | | | | | | +// | | | | | | | | | | | | | +// | | | | | | | | | | | | | +// | +---------+ | +----^-----+ +-----+----+ +-----+-----+ +----------+ | +// | | | | | | +// | | | | | | +// | +------------+ | | +-------------------------------+ | +// | | | | +-----+-----+ | | +-----------+ +----------+ | +// | | | | | Pending | | | | | | | | +// | | | +---+ Rerun <-------+ +---------------->Succeeding +---------->Completed | | +// | |Invalidating| | <-------+ | | | | | +// | | +-------> | | | | | | | +// | | | | | | | | | | | +// | | | +-----------+ | +-----+-----+ +----------+ | +// | +------------+ | | | +// | | | | +// | +-------------------------------+ | +// | | +// +--------------------------------------------------------------------------------------------------------------------+ +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + app, err := r.getSparkApplication(key) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{Requeue: true}, err + } + logger.Info("Reconciling SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + + // Check if the spark application is being deleted + if !app.DeletionTimestamp.IsZero() { + return r.handleSparkApplicationDeletion(ctx, req) + } + switch app.Status.AppState.State { + case v1beta2.ApplicationStateNew: + return r.reconcileNewSparkApplication(ctx, req) + case v1beta2.ApplicationStateSubmitted: + return r.reconcileSubmittedSparkApplication(ctx, req) + case v1beta2.ApplicationStateFailedSubmission: + return r.reconcileFailedSubmissionSparkApplication(ctx, req) + case v1beta2.ApplicationStateRunning: + return r.reconcileRunningSparkApplication(ctx, req) + case v1beta2.ApplicationStatePendingRerun: + return r.reconcilePendingRerunSparkApplication(ctx, req) + case v1beta2.ApplicationStateInvalidating: + return r.reconcileInvalidatingSparkApplication(ctx, req) + case v1beta2.ApplicationStateSucceeding: + return r.reconcileSucceedingSparkApplication(ctx, req) + case v1beta2.ApplicationStateFailing: + return r.reconcileFailingSparkApplication(ctx, req) + case v1beta2.ApplicationStateCompleted: + return r.reconcileCompletedSparkApplication(ctx, req) + case v1beta2.ApplicationStateFailed: + return r.reconcileFailedSparkApplication(ctx, req) + case v1beta2.ApplicationStateUnknown: + return r.reconcileUnknownSparkApplication(ctx, req) + } + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + Named("spark-application-controller"). + Watches( + &corev1.Pod{}, + NewSparkPodEventHandler(mgr.GetClient(), r.options.SparkExecutorMetrics), + builder.WithPredicates(newSparkPodEventFilter(r.options.Namespaces)), + ). + Watches( + &v1beta2.SparkApplication{}, + NewSparkApplicationEventHandler(r.options.SparkApplicationMetrics), + builder.WithPredicates( + NewSparkApplicationEventFilter( + mgr.GetClient(), + mgr.GetEventRecorderFor("spark-application-event-handler"), + r.options.Namespaces, + ), + ), + ). + WithOptions(options). + Complete(r) +} + +func (r *Reconciler) handleSparkApplicationDeletion(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + app, err := r.getSparkApplication(key) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{Requeue: true}, err + } + + if err := r.deleteSparkResources(ctx, app); err != nil { + logger.Error(err, "Failed to delete resources associated with SparkApplication", "name", app.Name, "namespace", app.Namespace) + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileNewSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateNew { + return nil + } + app := old.DeepCopy() + + if err := r.submitSparkApplication(app); err != nil { + logger.Error(err, "Failed to submit SparkApplication", "name", app.Name, "namespace", app.Namespace) + app.Status = v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.ApplicationStateFailedSubmission, + ErrorMessage: err.Error(), + }, + SubmissionAttempts: app.Status.SubmissionAttempts + 1, + LastSubmissionAttemptTime: metav1.Now(), + } + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{Requeue: true}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileSubmittedSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateSubmitted { + return nil + } + app := old.DeepCopy() + + if err := r.updateSparkApplicationState(ctx, app); err != nil { + return err + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileFailedSubmissionSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateFailedSubmission { + return nil + } + app := old.DeepCopy() + + if util.ShouldRetry(app) { + if isNextRetryDue(app) { + if r.validateSparkResourceDeletion(ctx, app) { + _ = r.submitSparkApplication(app) + } else { + if err := r.deleteSparkResources(ctx, app); err != nil { + logger.Error(err, "failed to delete resources associated with SparkApplication", "name", app.Name, "namespace", app.Namespace) + } + return err + } + } + } else { + app.Status.AppState.State = v1beta2.ApplicationStateFailed + app.Status.TerminationTime = metav1.Now() + r.recordSparkApplicationEvent(app) + } + + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileRunningSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateRunning { + return nil + } + app := old.DeepCopy() + + if err := r.updateSparkApplicationState(ctx, app); err != nil { + return err + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcilePendingRerunSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStatePendingRerun { + return nil + } + app := old.DeepCopy() + + logger.Info("Pending rerun SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + if r.validateSparkResourceDeletion(ctx, app) { + logger.Info("Successfully deleted resources associated with SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + r.recordSparkApplicationEvent(app) + r.resetSparkApplicationStatus(app) + if err = r.submitSparkApplication(app); err != nil { + logger.Error(err, "Failed to run spark-submit", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + } + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileInvalidatingSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateInvalidating { + return nil + } + app := old.DeepCopy() + + // Invalidate the current run and enqueue the SparkApplication for re-execution. + if err := r.deleteSparkResources(ctx, app); err != nil { + logger.Error(err, "Failed to delete resources associated with SparkApplication", "name", app.Name, "namespace", app.Namespace) + } else { + r.resetSparkApplicationStatus(app) + app.Status.AppState.State = v1beta2.ApplicationStatePendingRerun + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileSucceedingSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateSucceeding { + return nil + } + app := old.DeepCopy() + + if util.ShouldRetry(app) { + if err := r.deleteSparkResources(ctx, app); err != nil { + logger.Error(err, "failed to delete spark resources", "name", app.Name, "namespace", app.Namespace) + return err + } + app.Status.AppState.State = v1beta2.ApplicationStatePendingRerun + } else { + app.Status.AppState.State = v1beta2.ApplicationStateCompleted + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileFailingSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateFailing { + return nil + } + app := old.DeepCopy() + + if util.ShouldRetry(app) { + if isNextRetryDue(app) { + if err := r.deleteSparkResources(ctx, app); err != nil { + logger.Error(err, "failed to delete spark resources", "name", app.Name, "namespace", app.Namespace) + return err + } + app.Status.AppState.State = v1beta2.ApplicationStatePendingRerun + } + } else { + app.Status.AppState.State = v1beta2.ApplicationStateFailed + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileCompletedSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateCompleted { + return nil + } + app := old.DeepCopy() + + if util.IsExpired(app) { + logger.Info("Deleting expired SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + if err := r.client.Delete(ctx, app); err != nil { + return err + } + return nil + } + if err := r.updateExecutorState(ctx, app); err != nil { + return err + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + if err := r.cleanUpOnTermination(old, app); err != nil { + logger.Error(err, "Failed to clean up resources for SparkApplication", "name", old.Name, "namespace", old.Namespace, "state", old.Status.AppState.State) + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileFailedSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateFailed { + return nil + } + app := old.DeepCopy() + + if util.IsExpired(app) { + logger.Info("Deleting expired SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + if err := r.client.Delete(ctx, app); err != nil { + return err + } + return nil + } + if err := r.updateExecutorState(ctx, app); err != nil { + return err + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + if err := r.cleanUpOnTermination(old, app); err != nil { + logger.Error(err, "Failed to clean up resources for SparkApplication", "name", old.Name, "namespace", old.Namespace, "state", old.Status.AppState.State) + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileUnknownSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateUnknown { + return nil + } + app := old.DeepCopy() + + if err := r.updateSparkApplicationState(ctx, app); err != nil { + return err + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +// getSparkApplication gets the SparkApplication with the given name and namespace. +func (r *Reconciler) getSparkApplication(key types.NamespacedName) (*v1beta2.SparkApplication, error) { + app := &v1beta2.SparkApplication{} + if err := r.client.Get(context.TODO(), key, app); err != nil { + return nil, err + } + return app, nil +} + +// submitSparkApplication creates a new submission for the given SparkApplication and submits it using spark-submit. +func (r *Reconciler) submitSparkApplication(app *v1beta2.SparkApplication) error { + logger.Info("Submitting SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + + if util.PrometheusMonitoringEnabled(app) { + logger.Info("Configure Prometheus monitoring for SparkApplication", "name", app.Name, "namespace", app.Namespace) + if err := configPrometheusMonitoring(app, r.client); err != nil { + return fmt.Errorf("failed to configure Prometheus monitoring: %v", err) + } + } + + // Use batch scheduler to perform scheduling task before submitting (before build command arguments). + if needScheduling, scheduler := r.shouldDoBatchScheduling(app); needScheduling { + logger.Info("Do batch scheduling for SparkApplication", "name", app.Name, "namespace", app.Namespace) + if err := scheduler.Schedule(app); err != nil { + return fmt.Errorf("failed to process batch scheduler: %v", err) + } + } + + // Create web UI service for spark applications if enabled. + if r.options.EnableUIService { + service, err := r.createWebUIService(app) + if err != nil { + return fmt.Errorf("failed to create web UI service") + } + app.Status.DriverInfo.WebUIServiceName = service.serviceName + app.Status.DriverInfo.WebUIPort = service.servicePort + app.Status.DriverInfo.WebUIAddress = fmt.Sprintf("%s:%d", service.serviceIP, app.Status.DriverInfo.WebUIPort) + logger.Info("Created web UI service for SparkApplication", "name", app.Name, "namespace", app.Namespace) + + // Create UI Ingress if ingress-format is set. + if r.options.IngressURLFormat != "" { + // We are going to want to use an ingress url. + ingressURL, err := getDriverIngressURL(r.options.IngressURLFormat, app.Name, app.Namespace) + if err != nil { + return fmt.Errorf("failed to get ingress url: %v", err) + } + // need to ensure the spark.ui variables are configured correctly if a subPath is used. + if ingressURL.Path != "" { + if app.Spec.SparkConf == nil { + app.Spec.SparkConf = make(map[string]string) + } + app.Spec.SparkConf[common.SparkUIProxyBase] = ingressURL.Path + app.Spec.SparkConf[common.SparkUIProxyRedirectURI] = "/" + } + ingress, err := r.createWebUIIngress(app, *service, ingressURL, r.options.IngressClassName) + if err != nil { + return fmt.Errorf("failed to create web UI service") + } + app.Status.DriverInfo.WebUIIngressAddress = ingress.ingressURL.String() + app.Status.DriverInfo.WebUIIngressName = ingress.ingressName + logger.Info("Created web UI ingress for SparkApplication", "name", app.Name, "namespace", app.Namespace) + } + } + + for _, driverIngressConfiguration := range app.Spec.DriverIngressOptions { + logger.Info("Creating driver ingress service for SparkApplication", "name", app.Name, "namespace", app.Namespace) + service, err := r.createDriverIngressServiceFromConfiguration(app, &driverIngressConfiguration) + if err != nil { + return fmt.Errorf("failed to create driver ingress service for SparkApplication: %v", err) + } + // Create ingress if ingress-format is set. + if driverIngressConfiguration.IngressURLFormat != "" { + // We are going to want to use an ingress url. + ingressURL, err := getDriverIngressURL(driverIngressConfiguration.IngressURLFormat, app.Name, app.Namespace) + if err != nil { + return fmt.Errorf("failed to get driver ingress url: %v", err) + } + ingress, err := r.createDriverIngress(app, &driverIngressConfiguration, *service, ingressURL, r.options.IngressClassName) + if err != nil { + return fmt.Errorf("failed to create driver ingress: %v", err) + } + logger.V(1).Info("Created driver ingress for SparkApplication", "name", app.Name, "namespace", app.Namespace, "ingressName", ingress.ingressName, "ingressURL", ingress.ingressURL) + } + } + + driverPodName := util.GetDriverPodName(app) + app.Status.DriverInfo.PodName = driverPodName + app.Status.SubmissionID = uuid.New().String() + sparkSubmitArgs, err := buildSparkSubmitArgs(app) + if err != nil { + return fmt.Errorf("failed to build spark-submit arguments: %v", err) + } + + // Try submitting the application by running spark-submit. + logger.Info("Running spark-submit for SparkApplication", "name", app.Name, "namespace", app.Namespace, "arguments", sparkSubmitArgs) + submitted, err := runSparkSubmit(newSubmission(sparkSubmitArgs, app)) + if err != nil { + r.recordSparkApplicationEvent(app) + return fmt.Errorf("failed to run spark-submit: %v", err) + } + if !submitted { + // The application may not have been submitted even if err == nil, e.g., when some + // state update caused an attempt to re-submit the application, in which case no + // error gets returned from runSparkSubmit. If this is the case, we simply return. + return nil + } + + app.Status.AppState = v1beta2.ApplicationState{ + State: v1beta2.ApplicationStateSubmitted, + } + app.Status.SubmissionAttempts = app.Status.SubmissionAttempts + 1 + app.Status.ExecutionAttempts = app.Status.ExecutionAttempts + 1 + app.Status.LastSubmissionAttemptTime = metav1.Now() + r.recordSparkApplicationEvent(app) + return nil +} + +// Helper func to determine if the next retry the SparkApplication is due now. +func isNextRetryDue(app *v1beta2.SparkApplication) bool { + retryInterval := app.Spec.RestartPolicy.OnFailureRetryInterval + attemptsDone := app.Status.SubmissionAttempts + lastEventTime := app.Status.LastSubmissionAttemptTime + if retryInterval == nil || lastEventTime.IsZero() || attemptsDone <= 0 { + return false + } + + // Retry if we have waited at-least equal to attempts*RetryInterval since we do a linear back-off. + interval := time.Duration(*retryInterval) * time.Second * time.Duration(attemptsDone) + currentTime := time.Now() + logger.Info(fmt.Sprintf("currentTime is %v, interval is %v", currentTime, interval)) + return currentTime.After(lastEventTime.Add(interval)) +} + +// updateDriverState finds the driver pod of the application +// and updates the driver state based on the current phase of the pod. +func (r *Reconciler) updateDriverState(_ context.Context, app *v1beta2.SparkApplication) error { + // Either the driver pod doesn't exist yet or its name has not been updated. + if app.Status.DriverInfo.PodName == "" { + return fmt.Errorf("empty driver pod name with application state %s", app.Status.AppState.State) + } + + driverPod, err := r.getDriverPod(app) + if err != nil { + return err + } + + if driverPod == nil { + app.Status.AppState.State = v1beta2.ApplicationStateFailing + app.Status.AppState.ErrorMessage = "driver pod not found" + app.Status.TerminationTime = metav1.Now() + return nil + } + + app.Status.SparkApplicationID = util.GetSparkApplicationID(driverPod) + driverState := util.GetDriverState(driverPod) + if util.IsDriverTerminated(driverState) { + if app.Status.TerminationTime.IsZero() { + app.Status.TerminationTime = metav1.Now() + } + if driverState == v1beta2.DriverStateFailed { + if state := util.GetDriverContainerTerminatedState(driverPod); state != nil { + if state.ExitCode != 0 { + app.Status.AppState.ErrorMessage = fmt.Sprintf("driver container failed with ExitCode: %d, Reason: %s", state.ExitCode, state.Reason) + } + } else { + app.Status.AppState.ErrorMessage = "driver container status missing" + } + } + } + + newState := util.DriverStateToApplicationState(driverState) + // Only record a driver event if the application state (derived from the driver pod phase) has changed. + if newState != app.Status.AppState.State { + r.recordDriverEvent(app, driverState, driverPod.Name) + app.Status.AppState.State = newState + } + + return nil +} + +// updateExecutorState lists the executor pods of the application +// and updates the executor state based on the current phase of the pods. +func (r *Reconciler) updateExecutorState(_ context.Context, app *v1beta2.SparkApplication) error { + podList, err := r.getExecutorPods(app) + if err != nil { + return err + } + pods := podList.Items + + executorStateMap := make(map[string]v1beta2.ExecutorState) + var executorApplicationID string + for _, pod := range pods { + if util.IsExecutorPod(&pod) { + newState := util.GetExecutorState(&pod) + oldState, exists := app.Status.ExecutorState[pod.Name] + // Only record an executor event if the executor state is new or it has changed. + if !exists || newState != oldState { + if newState == v1beta2.ExecutorStateFailed { + execContainerState := util.GetExecutorContainerTerminatedState(&pod) + if execContainerState != nil { + r.recordExecutorEvent(app, newState, pod.Name, execContainerState.ExitCode, execContainerState.Reason) + } else { + // If we can't find the container state, + // we need to set the exitCode and the Reason to unambiguous values. + r.recordExecutorEvent(app, newState, pod.Name, -1, "Unknown (Container not Found)") + } + } else { + r.recordExecutorEvent(app, newState, pod.Name) + } + } + executorStateMap[pod.Name] = newState + + if executorApplicationID == "" { + executorApplicationID = util.GetSparkApplicationID(&pod) + } + } + } + + // ApplicationID label can be different on driver/executors. Prefer executor ApplicationID if set. + // Refer https://issues.apache.org/jira/projects/SPARK/issues/SPARK-25922 for details. + if executorApplicationID != "" { + app.Status.SparkApplicationID = executorApplicationID + } + + if app.Status.ExecutorState == nil { + app.Status.ExecutorState = make(map[string]v1beta2.ExecutorState) + } + for name, state := range executorStateMap { + app.Status.ExecutorState[name] = state + } + + // Handle missing/deleted executors. + for name, oldStatus := range app.Status.ExecutorState { + _, exists := executorStateMap[name] + if !util.IsExecutorTerminated(oldStatus) && !exists { + if !util.IsDriverRunning(app) { + // If ApplicationState is COMPLETED, in other words, the driver pod has been completed + // successfully. The executor pods terminate and are cleaned up, so we could not found + // the executor pod, under this circumstances, we assume the executor pod are completed. + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + app.Status.ExecutorState[name] = v1beta2.ExecutorStateCompleted + } else { + glog.Infof("Executor pod %s not found, assuming it was deleted.", name) + app.Status.ExecutorState[name] = v1beta2.ExecutorStateFailed + } + } else { + app.Status.ExecutorState[name] = v1beta2.ExecutorStateUnknown + } + } + } + + return nil +} + +func (r *Reconciler) getExecutorPods(app *v1beta2.SparkApplication) (*corev1.PodList, error) { + matchLabels := util.GetResourceLabels(app) + matchLabels[common.LabelSparkRole] = common.SparkRoleExecutor + pods := &corev1.PodList{} + if err := r.client.List(context.TODO(), pods, client.InNamespace(app.Namespace), client.MatchingLabels(matchLabels)); err != nil { + return nil, fmt.Errorf("failed to get pods for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) + } + return pods, nil +} + +func (r *Reconciler) getDriverPod(app *v1beta2.SparkApplication) (*corev1.Pod, error) { + pod := &corev1.Pod{} + var err error + + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Status.DriverInfo.PodName} + err = r.client.Get(context.TODO(), key, pod) + if err == nil { + return pod, nil + } + if !errors.IsNotFound(err) { + return nil, fmt.Errorf("failed to get driver pod %s: %v", app.Status.DriverInfo.PodName, err) + } + + return nil, nil +} + +func (r *Reconciler) updateSparkApplicationState(ctx context.Context, app *v1beta2.SparkApplication) error { + if err := r.updateDriverState(ctx, app); err != nil { + return err + } + + if err := r.updateExecutorState(ctx, app); err != nil { + return err + } + + return nil +} + +// updateSparkApplicationStatus updates the status of the SparkApplication. +func (r *Reconciler) updateSparkApplicationStatus(ctx context.Context, app *v1beta2.SparkApplication) error { + if err := r.client.Status().Update(ctx, app); err != nil { + return err + } + return nil +} + +// Delete the resources associated with the spark application. +func (r *Reconciler) deleteSparkResources(ctx context.Context, app *v1beta2.SparkApplication) error { + if err := r.deleteDriverPod(ctx, app); err != nil { + return err + } + + if err := r.deleteWebUIService(ctx, app); err != nil { + return err + } + + if err := r.deleteWebUIIngress(ctx, app); err != nil { + return err + } + + return nil +} + +func (r *Reconciler) deleteDriverPod(ctx context.Context, app *v1beta2.SparkApplication) error { + podName := app.Status.DriverInfo.PodName + // Derive the driver pod name in case the driver pod name was not recorded in the status, + // which could happen if the status update right after submission failed. + if podName == "" { + podName = util.GetDriverPodName(app) + } + + logger.Info("Deleting driver pod", "name", podName, "namespace", app.Namespace) + if err := r.client.Delete( + ctx, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: app.Namespace, + }, + }, + ); err != nil && !errors.IsNotFound(err) { + return err + } + + return nil +} + +func (r *Reconciler) deleteWebUIService(ctx context.Context, app *v1beta2.SparkApplication) error { + svcName := app.Status.DriverInfo.WebUIServiceName + if svcName == "" { + return nil + } + logger.Info("Deleting Spark web UI service", "name", svcName, "namespace", app.Namespace) + if err := r.client.Delete( + ctx, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcName, + Namespace: app.Namespace, + }, + }, + &client.DeleteOptions{ + GracePeriodSeconds: util.Int64Ptr(0), + }, + ); err != nil && !errors.IsNotFound(err) { + return err + } + return nil +} + +func (r *Reconciler) deleteWebUIIngress(ctx context.Context, app *v1beta2.SparkApplication) error { + ingressName := app.Status.DriverInfo.WebUIIngressName + if ingressName == "" { + return nil + } + + if util.IngressCapabilities.Has("networking.k8s.io/v1") { + logger.Info("Deleting Spark web UI ingress", "name", ingressName, "namespace", app.Namespace) + if err := r.client.Delete( + ctx, + &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingressName, + Namespace: app.Namespace, + }, + }, + &client.DeleteOptions{ + GracePeriodSeconds: util.Int64Ptr(0), + }, + ); err != nil && !errors.IsNotFound(err) { + return err + } + } + + if util.IngressCapabilities.Has("extensions/v1beta1") { + logger.V(1).Info("Deleting extensions/v1beta1 Spark UI Ingress", "name", ingressName, "namespace", app.Namespace) + if err := r.client.Delete( + context.TODO(), + &extensionsv1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingressName, + Namespace: app.Namespace, + }, + }, + &client.DeleteOptions{ + GracePeriodSeconds: util.Int64Ptr(0), + }, + ); err != nil && !errors.IsNotFound(err) { + return err + } + } + + return nil +} + +// Validate that any Spark resources (driver/Service/Ingress) created for the application have been deleted. +func (r *Reconciler) validateSparkResourceDeletion(ctx context.Context, app *v1beta2.SparkApplication) bool { + // Validate whether driver pod has been deleted. + driverPodName := app.Status.DriverInfo.PodName + // Derive the driver pod name in case the driver pod name was not recorded in the status, + // which could happen if the status update right after submission failed. + if driverPodName == "" { + driverPodName = util.GetDriverPodName(app) + } + if err := r.client.Get(ctx, types.NamespacedName{Name: driverPodName, Namespace: app.Namespace}, &corev1.Pod{}); err == nil || !errors.IsNotFound(err) { + return false + } + + // Validate whether Spark web UI service has been deleted. + sparkUIServiceName := app.Status.DriverInfo.WebUIServiceName + if sparkUIServiceName != "" { + if err := r.client.Get(ctx, types.NamespacedName{Name: sparkUIServiceName, Namespace: app.Namespace}, &corev1.Service{}); err == nil || !errors.IsNotFound(err) { + return false + } + } + + // Validate whether Spark web UI ingress has been deleted. + sparkUIIngressName := app.Status.DriverInfo.WebUIIngressName + if sparkUIIngressName != "" { + if err := r.client.Get(ctx, types.NamespacedName{Name: sparkUIIngressName, Namespace: app.Namespace}, &networkingv1.Ingress{}); err == nil || !errors.IsNotFound(err) { + return false + } + } + + return true +} + +func (r *Reconciler) recordSparkApplicationEvent(app *v1beta2.SparkApplication) { + switch app.Status.AppState.State { + case v1beta2.ApplicationStateNew: + r.recorder.Eventf( + app, + corev1.EventTypeNormal, + common.EventSparkApplicationAdded, + "SparkApplication %s was added, enqueuing it for submission", + app.Name, + ) + case v1beta2.ApplicationStateSubmitted: + r.recorder.Eventf( + app, + corev1.EventTypeNormal, + common.EventSparkApplicationSubmitted, + "SparkApplication %s was submitted successfully", + app.Name, + ) + case v1beta2.ApplicationStateFailedSubmission: + r.recorder.Eventf( + app, + corev1.EventTypeWarning, + common.EventSparkApplicationSubmissionFailed, + "failed to submit SparkApplication %s: %s", + app.Name, + app.Status.AppState.ErrorMessage, + ) + case v1beta2.ApplicationStateCompleted: + r.recorder.Eventf( + app, + corev1.EventTypeNormal, + common.EventSparkApplicationCompleted, + "SparkApplication %s completed", + app.Name, + ) + case v1beta2.ApplicationStateFailed: + r.recorder.Eventf( + app, + corev1.EventTypeWarning, + common.EventSparkApplicationFailed, + "SparkApplication %s failed: %s", + app.Name, + app.Status.AppState.ErrorMessage, + ) + case v1beta2.ApplicationStatePendingRerun: + r.recorder.Eventf( + app, + corev1.EventTypeWarning, + common.EventSparkApplicationPendingRerun, + "SparkApplication %s is pending rerun", + app.Name, + ) + } +} + +func (r *Reconciler) recordDriverEvent(app *v1beta2.SparkApplication, state v1beta2.DriverState, name string) { + switch state { + case v1beta2.DriverStatePending: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkDriverPending, "Driver %s is pending", name) + case v1beta2.DriverStateRunning: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkDriverRunning, "Driver %s is running", name) + case v1beta2.DriverStateCompleted: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkDriverCompleted, "Driver %s completed", name) + case v1beta2.DriverStateFailed: + r.recorder.Eventf(app, corev1.EventTypeWarning, common.EventSparkDriverFailed, "Driver %s failed", name) + case v1beta2.DriverStateUnknown: + r.recorder.Eventf(app, corev1.EventTypeWarning, common.EventSparkDriverUnknown, "Driver %s in unknown state", name) + } +} + +func (r *Reconciler) recordExecutorEvent(app *v1beta2.SparkApplication, state v1beta2.ExecutorState, args ...interface{}) { + switch state { + case v1beta2.ExecutorStatePending: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkExecutorPending, "Executor %s is pending", args) + case v1beta2.ExecutorStateRunning: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkExecutorRunning, "Executor %s is running", args) + case v1beta2.ExecutorStateCompleted: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkExecutorCompleted, "Executor %s completed", args) + case v1beta2.ExecutorStateFailed: + r.recorder.Eventf(app, corev1.EventTypeWarning, common.EventSparkExecutorFailed, "Executor %s failed with ExitCode: %d, Reason: %s", args) + case v1beta2.ExecutorStateUnknown: + r.recorder.Eventf(app, corev1.EventTypeWarning, common.EventSparkExecutorUnknown, "Executor %s in unknown state", args) + } +} + +func (r *Reconciler) resetSparkApplicationStatus(app *v1beta2.SparkApplication) { + status := &app.Status + switch status.AppState.State { + case v1beta2.ApplicationStateInvalidating: + status.SparkApplicationID = "" + status.SubmissionAttempts = 0 + status.ExecutionAttempts = 0 + status.LastSubmissionAttemptTime = metav1.Time{} + status.TerminationTime = metav1.Time{} + status.AppState.ErrorMessage = "" + status.ExecutorState = nil + case v1beta2.ApplicationStatePendingRerun: + status.SparkApplicationID = "" + status.SubmissionAttempts = 0 + status.LastSubmissionAttemptTime = metav1.Time{} + status.DriverInfo = v1beta2.DriverInfo{} + status.AppState.ErrorMessage = "" + status.ExecutorState = nil + } +} + +func (r *Reconciler) shouldDoBatchScheduling(app *v1beta2.SparkApplication) (bool, scheduler.Interface) { + if r.registry == nil || app.Spec.BatchScheduler == nil || *app.Spec.BatchScheduler == "" { + return false, nil + } + + var err error + var scheduler scheduler.Interface + + schedulerName := *app.Spec.BatchScheduler + switch schedulerName { + case common.VolcanoSchedulerName: + config := &volcano.Config{ + RestConfig: r.manager.GetConfig(), + } + scheduler, err = r.registry.GetScheduler(schedulerName, config) + } + + if err != nil || scheduler == nil { + logger.Error(err, "Failed to get scheduler for SparkApplication", "name", app.Name, "namespace", app.Namespace, "scheduler", schedulerName) + return false, nil + } + return scheduler.ShouldSchedule(app), scheduler +} + +// Clean up when the spark application is terminated. +func (r *Reconciler) cleanUpOnTermination(_, newApp *v1beta2.SparkApplication) error { + if needScheduling, scheduler := r.shouldDoBatchScheduling(newApp); needScheduling { + if err := scheduler.Cleanup(newApp); err != nil { + return err + } + } + return nil +} diff --git a/internal/controller/sparkapplication/controller_test.go b/internal/controller/sparkapplication/controller_test.go new file mode 100644 index 000000000..07e3b0606 --- /dev/null +++ b/internal/controller/sparkapplication/controller_test.go @@ -0,0 +1,290 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication_test + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/controller/sparkapplication" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var _ = Describe("SparkApplication Controller", func() { + Context("When reconciling a new SparkApplication", func() { + ctx := context.Background() + appName := "test" + appNamespace := "default" + key := types.NamespacedName{ + Name: appName, + Namespace: appNamespace, + } + + BeforeEach(func() { + By("Creating a test SparkApplication") + app := &v1beta2.SparkApplication{} + if err := k8sClient.Get(ctx, key, app); err != nil && errors.IsNotFound(err) { + app = &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: appNamespace, + }, + } + v1beta2.SetSparkApplicationDefaults(app) + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + + app.Status.AppState.State = v1beta2.ApplicationStateCompleted + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + } + }) + + AfterEach(func() { + app := &v1beta2.SparkApplication{} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting the created test SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + }) + + Context("When reconciling a completed SparkApplication", func() { + ctx := context.Background() + appName := "test" + appNamespace := "default" + key := types.NamespacedName{ + Name: appName, + Namespace: appNamespace, + } + + BeforeEach(func() { + By("Creating a test SparkApplication") + app := &v1beta2.SparkApplication{} + if err := k8sClient.Get(ctx, key, app); err != nil && errors.IsNotFound(err) { + app = &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: appNamespace, + Labels: map[string]string{ + common.LabelSparkAppName: app.Name, + }, + }, + } + v1beta2.SetSparkApplicationDefaults(app) + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + + app.Status.AppState.State = v1beta2.ApplicationStateCompleted + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + } + }) + + AfterEach(func() { + app := &v1beta2.SparkApplication{} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting the created test SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + + It("Should successfully reconcile a completed SparkApplication", func() { + By("Reconciling the created test SparkApplication") + reconciler := sparkapplication.NewReconciler( + nil, + k8sClient.Scheme(), + k8sClient, + nil, + nil, + sparkapplication.Options{Namespaces: []string{appNamespace}}, + ) + result, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) + Expect(err).NotTo(HaveOccurred()) + Expect(result.Requeue).To(BeFalse()) + }) + }) + + Context("When reconciling a completed expired SparkApplication", func() { + ctx := context.Background() + appName := "test" + appNamespace := "default" + key := types.NamespacedName{ + Name: appName, + Namespace: appNamespace, + } + + BeforeEach(func() { + By("Creating a test SparkApplication") + app := &v1beta2.SparkApplication{} + if err := k8sClient.Get(ctx, key, app); err != nil && errors.IsNotFound(err) { + app = &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: appNamespace, + }, + } + v1beta2.SetSparkApplicationDefaults(app) + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + + app.Status.AppState.State = v1beta2.ApplicationStateCompleted + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + } + }) + + AfterEach(func() { + app := &v1beta2.SparkApplication{} + Expect(errors.IsNotFound(k8sClient.Get(ctx, key, app))).To(BeTrue()) + }) + + It("Should delete expired SparkApplication", func() { + By("Set TimeToLiveSeconds and make the SparkApplication expired") + app := &v1beta2.SparkApplication{} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + app.Spec.TimeToLiveSeconds = util.Int64Ptr(60) + Expect(k8sClient.Update(ctx, app)).To(Succeed()) + app.Status.TerminationTime = metav1.NewTime(time.Now().Add(-2 * time.Minute)) + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + + By("Reconciling the expired SparkApplication") + reconciler := sparkapplication.NewReconciler( + nil, + k8sClient.Scheme(), + k8sClient, + nil, + nil, + sparkapplication.Options{Namespaces: []string{appNamespace}}, + ) + result, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) + Expect(err).NotTo(HaveOccurred()) + Expect(result.Requeue).To(BeFalse()) + }) + }) + + Context("When reconciling a failed SparkApplication", func() { + ctx := context.Background() + appName := "test" + appNamespace := "default" + key := types.NamespacedName{ + Name: appName, + Namespace: appNamespace, + } + + BeforeEach(func() { + By("Creating a test SparkApplication") + app := &v1beta2.SparkApplication{} + if err := k8sClient.Get(ctx, key, app); err != nil && errors.IsNotFound(err) { + app = &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: appNamespace, + }, + } + v1beta2.SetSparkApplicationDefaults(app) + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + + app.Status.AppState.State = v1beta2.ApplicationStateFailed + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + } + }) + + AfterEach(func() { + app := &v1beta2.SparkApplication{} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting the created test SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + + It("Should successfully reconcile a failed SparkApplication", func() { + By("Reconciling the created test SparkApplication") + reconciler := sparkapplication.NewReconciler( + nil, + k8sClient.Scheme(), + k8sClient, + nil, + nil, + sparkapplication.Options{Namespaces: []string{appNamespace}}, + ) + result, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) + Expect(err).NotTo(HaveOccurred()) + Expect(result.Requeue).To(BeFalse()) + }) + }) + + Context("When reconciling a failed expired SparkApplication", func() { + ctx := context.Background() + appName := "test" + appNamespace := "default" + key := types.NamespacedName{ + Name: appName, + Namespace: appNamespace, + } + + BeforeEach(func() { + By("Creating a test SparkApplication") + app := &v1beta2.SparkApplication{} + if err := k8sClient.Get(ctx, key, app); err != nil && errors.IsNotFound(err) { + app = &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: appNamespace, + }, + } + v1beta2.SetSparkApplicationDefaults(app) + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + + app.Status.AppState.State = v1beta2.ApplicationStateFailed + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + } + }) + + AfterEach(func() { + app := &v1beta2.SparkApplication{} + Expect(errors.IsNotFound(k8sClient.Get(ctx, key, app))).To(BeTrue()) + }) + + It("Should delete expired SparkApplication", func() { + By("Set TimeToLiveSeconds and make the SparkApplication expired") + app := &v1beta2.SparkApplication{} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + app.Spec.TimeToLiveSeconds = util.Int64Ptr(60) + Expect(k8sClient.Update(ctx, app)).To(Succeed()) + app.Status.TerminationTime = metav1.NewTime(time.Now().Add(-2 * time.Minute)) + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + + By("Reconciling the expired SparkApplication") + reconciler := sparkapplication.NewReconciler( + nil, + k8sClient.Scheme(), + k8sClient, + nil, + nil, + sparkapplication.Options{Namespaces: []string{appNamespace}}, + ) + result, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) + Expect(err).NotTo(HaveOccurred()) + Expect(result.Requeue).To(BeFalse()) + }) + }) +}) diff --git a/pkg/controller/sparkapplication/driveringress.go b/internal/controller/sparkapplication/driveringress.go similarity index 65% rename from pkg/controller/sparkapplication/driveringress.go rename to internal/controller/sparkapplication/driveringress.go index 08dab3146..982ee8b03 100644 --- a/pkg/controller/sparkapplication/driveringress.go +++ b/internal/controller/sparkapplication/driveringress.go @@ -19,26 +19,25 @@ package sparkapplication import ( "context" "fmt" - "github.com/golang/glog" "net/url" "regexp" - apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - clientset "k8s.io/client-go/kubernetes" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" "github.com/kubeflow/spark-operator/pkg/util" ) // SparkService encapsulates information about the driver UI service. type SparkService struct { serviceName string - serviceType apiv1.ServiceType + serviceType corev1.ServiceType servicePort int32 servicePortName string targetPort intstr.IntOrString @@ -56,8 +55,8 @@ type SparkIngress struct { ingressTLS []networkingv1.IngressTLS } -var ingressAppNameURLRegex = regexp.MustCompile("{{\\s*[$]appName\\s*}}") -var ingressAppNamespaceURLRegex = regexp.MustCompile("{{\\s*[$]appNamespace\\s*}}") +var ingressAppNameURLRegex = regexp.MustCompile(`{{\s*[$]appName\s*}}`) +var ingressAppNamespaceURLRegex = regexp.MustCompile(`{{\s*[$]appNamespace\s*}}`) func getDriverIngressURL(ingressURLFormat string, appName string, appNamespace string) (*url.URL, error) { ingressURL := ingressAppNamespaceURLRegex.ReplaceAllString(ingressAppNameURLRegex.ReplaceAllString(ingressURLFormat, appName), appNamespace) @@ -75,21 +74,20 @@ func getDriverIngressURL(ingressURLFormat string, appName string, appNamespace s return parsedURL, nil } -func createDriverIngress(app *v1beta2.SparkApplication, driverIngressConfiguration *v1beta2.DriverIngressConfiguration, service SparkService, ingressURL *url.URL, ingressClassName string, kubeClient clientset.Interface) (*SparkIngress, error) { +func (r *Reconciler) createDriverIngress(app *v1beta2.SparkApplication, driverIngressConfiguration *v1beta2.DriverIngressConfiguration, service SparkService, ingressURL *url.URL, ingressClassName string) (*SparkIngress, error) { if driverIngressConfiguration.ServicePort == nil { return nil, fmt.Errorf("cannot create Driver Ingress for application %s/%s due to empty ServicePort on driverIngressConfiguration", app.Namespace, app.Name) } ingressName := fmt.Sprintf("%s-ing-%d", app.Name, *driverIngressConfiguration.ServicePort) if util.IngressCapabilities.Has("networking.k8s.io/v1") { - return createDriverIngress_v1(app, service, ingressName, ingressURL, ingressClassName, kubeClient) - } else { - return createDriverIngress_legacy(app, service, ingressName, ingressURL, kubeClient) + return r.createDriverIngressV1(app, service, ingressName, ingressURL, ingressClassName) } + return r.createDriverIngressLegacy(app, service, ingressName, ingressURL) } -func createDriverIngress_v1(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL, ingressClassName string, kubeClient clientset.Interface) (*SparkIngress, error) { - ingressResourceAnnotations := getIngressResourceAnnotations(app) - ingressTlsHosts := getIngressTlsHosts(app) +func (r *Reconciler) createDriverIngressV1(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL, ingressClassName string) (*SparkIngress, error) { + ingressResourceAnnotations := util.GetWebUIIngressAnnotations(app) + ingressTLSHosts := util.GetWebUIIngressTLS(app) ingressURLPath := ingressURL.Path // If we're serving on a subpath, we need to ensure we create capture groups @@ -99,12 +97,12 @@ func createDriverIngress_v1(app *v1beta2.SparkApplication, service SparkService, implementationSpecific := networkingv1.PathTypeImplementationSpecific - ingress := networkingv1.Ingress{ + ingress := &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: ingressName, Namespace: app.Namespace, - Labels: getResourceLabels(app), - OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, + Labels: util.GetResourceLabels(app), + OwnerReferences: []metav1.OwnerReference{util.GetOwnerReference(app)}, }, Spec: networkingv1.IngressSpec{ Rules: []networkingv1.IngressRule{{ @@ -140,53 +138,52 @@ func createDriverIngress_v1(app *v1beta2.SparkApplication, service SparkService, } ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/rewrite-target"] = "/$2" } - if len(ingressTlsHosts) != 0 { - ingress.Spec.TLS = ingressTlsHosts + if len(ingressTLSHosts) != 0 { + ingress.Spec.TLS = ingressTLSHosts } if len(ingressClassName) != 0 { ingress.Spec.IngressClassName = &ingressClassName } - glog.Infof("Creating an Ingress %s for the Spark UI for application %s", ingress.Name, app.Name) - _, err := kubeClient.NetworkingV1().Ingresses(ingress.Namespace).Create(context.TODO(), &ingress, metav1.CreateOptions{}) - if err != nil { - return nil, err + logger.Info("Creating networking.v1/Ingress for SparkApplication web UI", "name", app.Name, "namespace", app.Namespace, "ingressName", ingress.Name) + if err := r.client.Create(context.TODO(), ingress); err != nil { + return nil, fmt.Errorf("failed to create ingress %s/%s: %v", ingress.Namespace, ingress.Name, err) } return &SparkIngress{ ingressName: ingress.Name, ingressURL: ingressURL, ingressClassName: ingressClassName, annotations: ingress.Annotations, - ingressTLS: ingressTlsHosts, + ingressTLS: ingressTLSHosts, }, nil } -func createDriverIngress_legacy(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL, kubeClient clientset.Interface) (*SparkIngress, error) { - ingressResourceAnnotations := getIngressResourceAnnotations(app) - // var ingressTlsHosts networkingv1.IngressTLS[] - // That we convert later for extensionsv1beta1, but return as is in SparkIngress - ingressTlsHosts := getIngressTlsHosts(app) +func (r *Reconciler) createDriverIngressLegacy(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL) (*SparkIngress, error) { + ingressResourceAnnotations := util.GetWebUIIngressAnnotations(app) + // var ingressTLSHosts networkingv1.IngressTLS[] + // That we convert later for extensionsv1beta1, but return as is in SparkIngress. + ingressTLSHosts := util.GetWebUIIngressTLS(app) ingressURLPath := ingressURL.Path - // If we're serving on a subpath, we need to ensure we create capture groups + // If we're serving on a subpath, we need to ensure we create capture groups. if ingressURLPath != "" && ingressURLPath != "/" { ingressURLPath = ingressURLPath + "(/|$)(.*)" } - ingress := extensions.Ingress{ + ingress := &extensionsv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: ingressName, Namespace: app.Namespace, - Labels: getResourceLabels(app), - OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, + Labels: util.GetResourceLabels(app), + OwnerReferences: []metav1.OwnerReference{util.GetOwnerReference(app)}, }, - Spec: extensions.IngressSpec{ - Rules: []extensions.IngressRule{{ + Spec: extensionsv1beta1.IngressSpec{ + Rules: []extensionsv1beta1.IngressRule{{ Host: ingressURL.Host, - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{{ - Backend: extensions.IngressBackend{ + IngressRuleValue: extensionsv1beta1.IngressRuleValue{ + HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ + Paths: []extensionsv1beta1.HTTPIngressPath{{ + Backend: extensionsv1beta1.IngressBackend{ ServiceName: service.serviceName, ServicePort: intstr.IntOrString{ Type: intstr.Int, @@ -212,52 +209,51 @@ func createDriverIngress_legacy(app *v1beta2.SparkApplication, service SparkServ } ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/rewrite-target"] = "/$2" } - if len(ingressTlsHosts) != 0 { - ingress.Spec.TLS = convertIngressTlsHostsToLegacy(ingressTlsHosts) + if len(ingressTLSHosts) != 0 { + ingress.Spec.TLS = convertIngressTLSHostsToLegacy(ingressTLSHosts) } - glog.Infof("Creating an extensions/v1beta1 Ingress %s for application %s", ingress.Name, app.Name) - _, err := kubeClient.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), &ingress, metav1.CreateOptions{}) - if err != nil { - return nil, err + logger.Info("Creating extensions.v1beta1/Ingress for SparkApplication web UI", app.Name, "namespace", app.Namespace, "ingressName", ingress.Name) + if err := r.client.Create(context.TODO(), ingress); err != nil { + return nil, fmt.Errorf("failed to create ingress %s/%s: %v", ingress.Namespace, ingress.Name, err) } return &SparkIngress{ ingressName: ingress.Name, ingressURL: ingressURL, annotations: ingress.Annotations, - ingressTLS: ingressTlsHosts, + ingressTLS: ingressTLSHosts, }, nil } -func convertIngressTlsHostsToLegacy(ingressTlsHosts []networkingv1.IngressTLS) []extensions.IngressTLS { - var ingressTlsHosts_legacy []extensions.IngressTLS - for _, ingressTlsHost := range ingressTlsHosts { - ingressTlsHosts_legacy = append(ingressTlsHosts_legacy, extensions.IngressTLS{ - Hosts: ingressTlsHost.Hosts, - SecretName: ingressTlsHost.SecretName, +func convertIngressTLSHostsToLegacy(ingressTLSHosts []networkingv1.IngressTLS) []extensionsv1beta1.IngressTLS { + var ingressTLSHostsLegacy []extensionsv1beta1.IngressTLS + for _, ingressTLSHost := range ingressTLSHosts { + ingressTLSHostsLegacy = append(ingressTLSHostsLegacy, extensionsv1beta1.IngressTLS{ + Hosts: ingressTLSHost.Hosts, + SecretName: ingressTLSHost.SecretName, }) } - return ingressTlsHosts_legacy + return ingressTLSHostsLegacy } -func createDriverIngressService( +func (r *Reconciler) createDriverIngressService( app *v1beta2.SparkApplication, portName string, port int32, targetPort int32, serviceName string, - serviceType apiv1.ServiceType, + serviceType corev1.ServiceType, serviceAnnotations map[string]string, serviceLabels map[string]string, - kubeClient clientset.Interface) (*SparkService, error) { - service := &apiv1.Service{ +) (*SparkService, error) { + service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: app.Namespace, - Labels: getResourceLabels(app), - OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, + Labels: util.GetResourceLabels(app), + OwnerReferences: []metav1.OwnerReference{util.GetOwnerReference(app)}, }, - Spec: apiv1.ServiceSpec{ - Ports: []apiv1.ServicePort{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ { Name: portName, Port: port, @@ -268,26 +264,30 @@ func createDriverIngressService( }, }, Selector: map[string]string{ - config.SparkAppNameLabel: app.Name, - config.SparkRoleLabel: config.SparkDriverRole, + common.LabelSparkAppName: app.Name, + common.LabelSparkRole: common.SparkRoleDriver, }, Type: serviceType, }, } + if len(serviceLabels) != 0 { + service.ObjectMeta.Labels = serviceLabels + } + if len(serviceAnnotations) != 0 { service.ObjectMeta.Annotations = serviceAnnotations } - if len(serviceLabels) != 0 { - glog.Infof("Creating a service labels %s for the Driver Ingress: %v", service.Name, &serviceLabels) - service.ObjectMeta.Labels = serviceLabels - } + if err := r.client.Create(context.TODO(), service); err != nil { + if !errors.IsAlreadyExists(err) { + return nil, err + } - glog.Infof("Creating a service %s for the Driver Ingress for application %s", service.Name, app.Name) - service, err := kubeClient.CoreV1().Services(app.Namespace).Create(context.TODO(), service, metav1.CreateOptions{}) - if err != nil { - return nil, err + // Update the service if it already exists. + if err := r.client.Update(context.TODO(), service); err != nil { + return nil, err + } } return &SparkService{ @@ -305,7 +305,7 @@ func createDriverIngressService( func getDriverIngressServicePort(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) (int32, error) { port := driverIngressConfiguration.ServicePort if port == nil { - return 0, fmt.Errorf("servie port is nil on driver ingress configuration") + return 0, fmt.Errorf("service port is nil on driver ingress configuration") } return *port, nil } @@ -326,11 +326,11 @@ func getDriverIngressServiceName(app *v1beta2.SparkApplication, port int32) stri return fmt.Sprintf("%s-driver-%d", app.Name, port) } -func getDriverIngressServiceType(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) apiv1.ServiceType { +func getDriverIngressServiceType(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) corev1.ServiceType { if driverIngressConfiguration.ServiceType != nil { return *driverIngressConfiguration.ServiceType } - return apiv1.ServiceTypeClusterIP + return corev1.ServiceTypeClusterIP } func getDriverIngressServiceAnnotations(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) map[string]string { @@ -353,10 +353,10 @@ func getDriverIngressServiceLabels(driverIngressConfiguration *v1beta2.DriverIng return serviceLabels } -func createDriverIngressServiceFromConfiguration( +func (r *Reconciler) createDriverIngressServiceFromConfiguration( app *v1beta2.SparkApplication, driverIngressConfiguration *v1beta2.DriverIngressConfiguration, - kubeClient clientset.Interface) (*SparkService, error) { +) (*SparkService, error) { portName := getDriverIngressServicePortName(driverIngressConfiguration) port, err := getDriverIngressServicePort(driverIngressConfiguration) if err != nil { @@ -366,5 +366,5 @@ func createDriverIngressServiceFromConfiguration( serviceType := getDriverIngressServiceType(driverIngressConfiguration) serviceAnnotations := getDriverIngressServiceAnnotations(driverIngressConfiguration) serviceLabels := getDriverIngressServiceLabels(driverIngressConfiguration) - return createDriverIngressService(app, portName, port, port, serviceName, serviceType, serviceAnnotations, serviceLabels, kubeClient) + return r.createDriverIngressService(app, portName, port, port, serviceName, serviceType, serviceAnnotations, serviceLabels) } diff --git a/internal/controller/sparkapplication/driveringress_test.go b/internal/controller/sparkapplication/driveringress_test.go new file mode 100644 index 000000000..498ecc330 --- /dev/null +++ b/internal/controller/sparkapplication/driveringress_test.go @@ -0,0 +1,713 @@ +/* +Copyright 2024 spark-operator contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +// func TestCreateDriverIngressService(t *testing.T) { +// type testcase struct { +// name string +// app *v1beta2.SparkApplication +// expectedServices []SparkService +// expectedSelector map[string]string +// expectError bool +// } +// testFn := func(test testcase, t *testing.T) { +// fakeClient := fake.NewSimpleClientset() +// util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true} +// if len(test.expectedServices) != len(test.app.Spec.DriverIngressOptions) { +// t.Errorf("%s: size of test.expectedServices (%d) and test.app.Spec.DriverIngressOptions (%d) is different for %s", +// test.name, len(test.expectedServices), len(test.app.Spec.DriverIngressOptions), test.app.Name) +// } +// for i, driverIngressConfiguration := range test.app.Spec.DriverIngressOptions { +// sparkService, err := createDriverIngressServiceFromConfiguration(test.app, &driverIngressConfiguration, fakeClient) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// expectedService := test.expectedServices[i] +// if sparkService.serviceName != expectedService.serviceName { +// t.Errorf("%s: for service name wanted %s got %s", test.name, expectedService.serviceName, sparkService.serviceName) +// } +// service, err := fakeClient.CoreV1(). +// Services(test.app.Namespace). +// Get(context.TODO(), sparkService.serviceName, metav1.GetOptions{}) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// if service.Labels[common.SparkAppNameLabel] != test.app.Name { +// t.Errorf("%s: service of app %s has the wrong labels", test.name, test.app.Name) +// } +// if !reflect.DeepEqual(test.expectedSelector, service.Spec.Selector) { +// t.Errorf("%s: for label selector wanted %s got %s", test.name, test.expectedSelector, service.Spec.Selector) +// } +// if service.Spec.Type != expectedService.serviceType { +// t.Errorf("%s: for service type wanted %s got %s", test.name, expectedService.serviceType, service.Spec.Type) +// } +// if len(service.Spec.Ports) != 1 { +// t.Errorf("%s: wanted a single port got %d ports", test.name, len(service.Spec.Ports)) +// } +// port := service.Spec.Ports[0] +// if port.Port != expectedService.servicePort { +// t.Errorf("%s: unexpected port wanted %d got %d", test.name, expectedService.servicePort, port.Port) +// } +// if port.Name != expectedService.servicePortName { +// t.Errorf("%s: unexpected port name wanted %s got %s", test.name, expectedService.servicePortName, port.Name) +// } +// serviceAnnotations := service.ObjectMeta.Annotations +// if !reflect.DeepEqual(serviceAnnotations, expectedService.serviceAnnotations) { +// t.Errorf("%s: unexpected annotations wanted %s got %s", test.name, expectedService.serviceAnnotations, serviceAnnotations) +// } +// serviceLabels := service.ObjectMeta.Labels +// if !reflect.DeepEqual(serviceLabels, expectedService.serviceLabels) { +// t.Errorf("%s: unexpected labels wanted %s got %s", test.name, expectedService.serviceLabels, serviceLabels) +// } +// } +// } +// serviceNameFormat := "%s-driver-%d" +// portNameFormat := "driver-ing-%d" +// app1 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo1", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: util.Int32Ptr(8888), +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// ExecutionAttempts: 1, +// }, +// } +// app2 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo2", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: util.Int32Ptr(8888), +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-2", +// ExecutionAttempts: 2, +// }, +// } +// app3 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo3", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: nil, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-3", +// }, +// } +// var appPort int32 = 80 +// app4 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo4", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// }, +// }, +// SparkConf: map[string]string{ +// sparkUIPortConfigurationKey: "4041", +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-3", +// }, +// } +// var serviceTypeNodePort apiv1.ServiceType = apiv1.ServiceTypeNodePort +// app5 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo5", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: util.Int32Ptr(8888), +// ServiceType: &serviceTypeNodePort, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-2", +// ExecutionAttempts: 2, +// }, +// } +// appPortName := "http-spark-test" +// app6 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo6", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// ServicePortName: &appPortName, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-6", +// }, +// } +// app7 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo7", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: util.Int32Ptr(8888), +// ServiceAnnotations: map[string]string{ +// "key": "value", +// }, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-7", +// ExecutionAttempts: 1, +// }, +// } +// app8 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo8", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: util.Int32Ptr(8888), +// ServiceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo8", +// "key": "value", +// }, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-8", +// ExecutionAttempts: 1, +// }, +// } +// testcases := []testcase{ +// { +// name: "service with custom serviceport and serviceport and target port are same", +// app: app1, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: fmt.Sprintf(portNameFormat, *app1.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: *app1.Spec.DriverIngressOptions[0].ServicePort, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo1", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(*app1.Spec.DriverIngressOptions[0].ServicePort), +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo1", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with default port", +// app: app2, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app2.GetName(), *app2.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: fmt.Sprintf(portNameFormat, *app2.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: int32(*app2.Spec.DriverIngressOptions[0].ServicePort), +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo2", +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo2", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom serviceport and serviceport and target port are different", +// app: app4, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app4.GetName(), *app4.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: fmt.Sprintf(portNameFormat, *app4.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: 80, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo4", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo4", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom servicetype", +// app: app5, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app5.GetName(), *app5.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeNodePort, +// servicePortName: fmt.Sprintf(portNameFormat, *app5.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: *app5.Spec.DriverIngressOptions[0].ServicePort, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo5", +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo5", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom serviceportname", +// app: app6, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app6.GetName(), *app6.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: "http-spark-test", +// servicePort: int32(80), +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo6", +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo6", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with annotation", +// app: app7, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app7.GetName(), *app7.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: fmt.Sprintf(portNameFormat, *app7.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: *app7.Spec.DriverIngressOptions[0].ServicePort, +// serviceAnnotations: map[string]string{ +// "key": "value", +// }, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo7", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo7", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom labels", +// app: app8, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app8.GetName(), *app8.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: fmt.Sprintf(portNameFormat, *app8.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: *app8.Spec.DriverIngressOptions[0].ServicePort, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo8", +// "key": "value", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo8", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with bad port configurations", +// app: app3, +// expectError: true, +// expectedServices: []SparkService{{}}, +// }, +// } +// for _, test := range testcases { +// testFn(test, t) +// } +// } + +// func TestCreateDriverIngress(t *testing.T) { +// type testcase struct { +// name string +// app *v1beta2.SparkApplication +// expectedIngresses []SparkIngress +// expectError bool +// } + +// testFn := func(test testcase, t *testing.T, ingressURLFormat string, ingressClassName string) { +// fakeClient := fake.NewSimpleClientset() +// if len(test.expectedIngresses) != len(test.app.Spec.DriverIngressOptions) { +// t.Errorf("%s: size of test.expectedIngresses (%d) and test.app.Spec.DriverIngressOptions (%d) is different for %s", +// test.name, len(test.expectedIngresses), len(test.app.Spec.DriverIngressOptions), test.app.Name) +// } +// for i, driverIngressConfiguration := range test.app.Spec.DriverIngressOptions { +// sparkService, err := createDriverIngressServiceFromConfiguration(test.app, &driverIngressConfiguration, fakeClient) +// if err != nil { +// t.Fatal(err) +// } +// ingressURL, err := getDriverIngressURL(ingressURLFormat, test.app.Name, test.app.Namespace) +// if err != nil { +// t.Fatal(err) +// } +// sparkIngress, err := createDriverIngress(test.app, &driverIngressConfiguration, *sparkService, ingressURL, ingressClassName, fakeClient) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// expectedIngress := test.expectedIngresses[i] +// if sparkIngress.ingressName != expectedIngress.ingressName { +// t.Errorf("Ingress name wanted %s got %s", expectedIngress.ingressName, sparkIngress.ingressName) +// } +// if sparkIngress.ingressURL.String() != expectedIngress.ingressURL.String() { +// t.Errorf("Ingress URL wanted %s got %s", expectedIngress.ingressURL, sparkIngress.ingressURL) +// } +// ingress, err := fakeClient.NetworkingV1().Ingresses(test.app.Namespace). +// Get(context.TODO(), sparkIngress.ingressName, metav1.GetOptions{}) +// if err != nil { +// t.Fatal(err) +// } +// if len(ingress.Annotations) != 0 { +// for key, value := range ingress.Annotations { +// if expectedIngress.annotations[key] != ingress.Annotations[key] { +// t.Errorf("Expected annotation: %s=%s but found : %s=%s", key, value, key, ingress.Annotations[key]) +// } +// } +// } +// if len(ingress.Spec.TLS) != 0 { +// for _, ingressTls := range ingress.Spec.TLS { +// if ingressTls.Hosts[0] != expectedIngress.ingressTLS[0].Hosts[0] { +// t.Errorf("Expected ingressTls host: %s but found : %s", expectedIngress.ingressTLS[0].Hosts[0], ingressTls.Hosts[0]) +// } +// if ingressTls.SecretName != expectedIngress.ingressTLS[0].SecretName { +// t.Errorf("Expected ingressTls secretName: %s but found : %s", expectedIngress.ingressTLS[0].SecretName, ingressTls.SecretName) +// } +// } +// } +// if ingress.Labels[common.SparkAppNameLabel] != test.app.Name { +// t.Errorf("Ingress of app %s has the wrong labels", test.app.Name) +// } + +// if len(ingress.Spec.Rules) != 1 { +// t.Errorf("No Ingress rules found.") +// } +// ingressRule := ingress.Spec.Rules[0] +// // If we have a path, then the ingress adds capture groups +// if ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "" && ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "/" { +// expectedIngress.ingressURL.Path = expectedIngress.ingressURL.Path + "(/|$)(.*)" +// } +// if ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path != expectedIngress.ingressURL.Host+expectedIngress.ingressURL.Path { +// t.Errorf("Ingress of app %s has the wrong host %s", ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path, expectedIngress.ingressURL.Host+expectedIngress.ingressURL.Path) +// } + +// if len(ingressRule.IngressRuleValue.HTTP.Paths) != 1 { +// t.Errorf("No Ingress paths found.") +// } +// ingressPath := ingressRule.IngressRuleValue.HTTP.Paths[0] +// if ingressPath.Backend.Service.Name != sparkService.serviceName { +// t.Errorf("Service name wanted %s got %s", sparkService.serviceName, ingressPath.Backend.Service.Name) +// } +// if *ingressPath.PathType != networkingv1.PathTypeImplementationSpecific { +// t.Errorf("PathType wanted %s got %s", networkingv1.PathTypeImplementationSpecific, *ingressPath.PathType) +// } +// if ingressPath.Backend.Service.Port.Number != sparkService.servicePort { +// t.Errorf("Service port wanted %v got %v", sparkService.servicePort, ingressPath.Backend.Service.Port.Number) +// } +// } +// } + +// ingressNameFormat := "%s-ing-%d" +// var appPort int32 = 80 +// app1 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app2 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app3 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// IngressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, +// }, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app4 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// }, +// IngressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: ""}, +// }, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } + +// testcases := []testcase{ +// { +// name: "simple ingress object", +// app: app1, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), +// }, +// }, +// expectError: false, +// }, +// { +// name: "ingress with annotations and without tls configuration", +// app: app2, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app2.GetName(), *app2.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError(app2.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// }, +// }, +// expectError: false, +// }, +// { +// name: "ingress with annotations and tls configuration", +// app: app3, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app3.GetName(), *app3.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// ingressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, +// }, +// }, +// }, +// expectError: false, +// }, +// { +// name: "ingress with incomplete list of annotations", +// app: app4, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app4.GetName(), *app4.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// ingressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: ""}, +// }, +// }, +// }, +// expectError: true, +// }, +// } + +// for _, test := range testcases { +// testFn(test, t, "{{$appName}}.ingress.clusterName.com", "") +// } + +// testcases = []testcase{ +// { +// name: "simple ingress object with ingress URL Format with path", +// app: app1, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError("ingress.clusterName.com/"+app1.GetNamespace()+"/"+app1.GetName(), t), +// annotations: map[string]string{ +// "nginx.ingress.kubernetes.io/rewrite-target": "/$2", +// }, +// }, +// }, +// expectError: false, +// }, +// } + +// for _, test := range testcases { +// testFn(test, t, "ingress.clusterName.com/{{$appNamespace}}/{{$appName}}", "") +// } + +// testcases = []testcase{ +// { +// name: "simple ingress object with ingressClassName set", +// app: app1, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), +// ingressClassName: "nginx", +// }, +// }, +// expectError: false, +// }, +// } +// for _, test := range testcases { +// testFn(test, t, "{{$appName}}.ingress.clusterName.com", "nginx") +// } +// } diff --git a/internal/controller/sparkapplication/event_filter.go b/internal/controller/sparkapplication/event_filter.go new file mode 100644 index 000000000..3fe49ee13 --- /dev/null +++ b/internal/controller/sparkapplication/event_filter.go @@ -0,0 +1,207 @@ +/* +Copyright 2024 The kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/util" +) + +// sparkPodEventFilter filters Spark pod events. +type sparkPodEventFilter struct { + namespaces map[string]bool +} + +// sparkPodEventFilter implements the predicate.Predicate interface. +var _ predicate.Predicate = &sparkPodEventFilter{} + +// newSparkPodEventFilter creates a new SparkPodEventFilter instance. +func newSparkPodEventFilter(namespaces []string) *sparkPodEventFilter { + nsMap := make(map[string]bool) + for _, ns := range namespaces { + nsMap[ns] = true + } + + return &sparkPodEventFilter{ + namespaces: nsMap, + } +} + +// Create implements predicate.Predicate. +func (f *sparkPodEventFilter) Create(e event.CreateEvent) bool { + pod, ok := e.Object.(*corev1.Pod) + if !ok { + return false + } + + return f.filter(pod) +} + +// Update implements predicate.Predicate. +func (f *sparkPodEventFilter) Update(e event.UpdateEvent) bool { + oldPod, ok := e.ObjectOld.(*corev1.Pod) + if !ok { + return false + } + + newPod, ok := e.ObjectNew.(*corev1.Pod) + if !ok { + return false + } + + if newPod.Status.Phase == oldPod.Status.Phase { + return false + } + + return f.filter(newPod) +} + +// Delete implements predicate.Predicate. +func (f *sparkPodEventFilter) Delete(e event.DeleteEvent) bool { + pod, ok := e.Object.(*corev1.Pod) + if !ok { + return false + } + + return f.filter(pod) +} + +// Generic implements predicate.Predicate. +func (f *sparkPodEventFilter) Generic(e event.GenericEvent) bool { + pod, ok := e.Object.(*corev1.Pod) + if !ok { + return false + } + + return f.filter(pod) +} + +func (f *sparkPodEventFilter) filter(pod *corev1.Pod) bool { + if !util.IsLaunchedBySparkOperator(pod) { + return false + } + + return f.namespaces[metav1.NamespaceAll] || f.namespaces[pod.Namespace] +} + +type EventFilter struct { + client client.Client + recorder record.EventRecorder + namespaces map[string]bool +} + +var _ predicate.Predicate = &EventFilter{} + +func NewSparkApplicationEventFilter(client client.Client, recorder record.EventRecorder, namespaces []string) *EventFilter { + nsMap := make(map[string]bool) + for _, ns := range namespaces { + nsMap[ns] = true + } + + return &EventFilter{ + client: client, + recorder: recorder, + namespaces: nsMap, + } +} + +// Create implements predicate.Predicate. +func (f *EventFilter) Create(e event.CreateEvent) bool { + app, ok := e.Object.(*v1beta2.SparkApplication) + if !ok { + return false + } + + return f.filter(app) +} + +// Update implements predicate.Predicate. +func (f *EventFilter) Update(e event.UpdateEvent) bool { + oldApp, ok := e.ObjectOld.(*v1beta2.SparkApplication) + if !ok { + return false + } + + newApp, ok := e.ObjectNew.(*v1beta2.SparkApplication) + if !ok { + return false + } + + if !f.filter(newApp) { + return false + } + + if oldApp.ResourceVersion == newApp.ResourceVersion && !util.IsExpired(newApp) && !util.ShouldRetry(newApp) { + return false + } + + // The spec has changed. This is currently best effort as we can potentially miss updates + // and end up in an inconsistent state. + if !equality.Semantic.DeepEqual(oldApp.Spec, newApp.Spec) { + // Force-set the application status to Invalidating which handles clean-up and application re-run. + newApp.Status.AppState.State = v1beta2.ApplicationStateInvalidating + logger.Info("Updating SparkApplication status", "name", newApp.Name, "namespace", newApp.Namespace, " oldState", oldApp.Status.AppState.State, "newState", newApp.Status.AppState.State) + if err := f.client.Status().Update(context.TODO(), newApp); err != nil { + logger.Error(err, "Failed to update application status", "application", newApp.Name) + f.recorder.Eventf( + newApp, + corev1.EventTypeWarning, + "SparkApplicationSpecUpdateFailed", + "Failed to update spec for SparkApplication %s: %v", + newApp.Name, + err, + ) + return false + } + } + + return true +} + +// Delete implements predicate.Predicate. +func (f *EventFilter) Delete(e event.DeleteEvent) bool { + app, ok := e.Object.(*v1beta2.SparkApplication) + if !ok { + return false + } + + return f.filter(app) +} + +// Generic implements predicate.Predicate. +func (f *EventFilter) Generic(e event.GenericEvent) bool { + app, ok := e.Object.(*v1beta2.SparkApplication) + if !ok { + return false + } + + return f.filter(app) +} + +func (f *EventFilter) filter(app *v1beta2.SparkApplication) bool { + return f.namespaces[metav1.NamespaceAll] || f.namespaces[app.Namespace] +} diff --git a/internal/controller/sparkapplication/event_handler.go b/internal/controller/sparkapplication/event_handler.go new file mode 100644 index 000000000..0e2ee5896 --- /dev/null +++ b/internal/controller/sparkapplication/event_handler.go @@ -0,0 +1,220 @@ +/* +Copyright 2024 The kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/metrics" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +// SparkPodEventHandler watches Spark pods and update the SparkApplication objects accordingly. +type SparkPodEventHandler struct { + client client.Client + metrics *metrics.SparkExecutorMetrics +} + +// SparkPodEventHandler implements handler.EventHandler. +var _ handler.EventHandler = &SparkPodEventHandler{} + +// NewSparkPodEventHandler creates a new sparkPodEventHandler instance. +func NewSparkPodEventHandler(client client.Client, metrics *metrics.SparkExecutorMetrics) *SparkPodEventHandler { + handler := &SparkPodEventHandler{ + client: client, + metrics: metrics, + } + return handler +} + +// Create implements handler.EventHandler. +func (h *SparkPodEventHandler) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return + } + logger.Info("Spark pod created", "name", pod.Name, "namespace", pod.Namespace, "phase", pod.Status.Phase) + h.enqueueSparkAppForUpdate(ctx, pod, queue) + + if h.metrics != nil && util.IsExecutorPod(pod) { + h.metrics.HandleSparkExecutorCreate(pod) + } +} + +// Update implements handler.EventHandler. +func (h *SparkPodEventHandler) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { + oldPod, ok := event.ObjectOld.(*corev1.Pod) + if !ok { + return + } + + newPod, ok := event.ObjectNew.(*corev1.Pod) + if !ok { + return + } + + if newPod.Status.Phase == oldPod.Status.Phase { + return + } + + logger.Info("Spark pod updated", "name", newPod.Name, "namespace", newPod.Namespace, "oldPhase", oldPod.Status.Phase, "newPhase", newPod.Status.Phase) + h.enqueueSparkAppForUpdate(ctx, newPod, queue) + + if h.metrics != nil && util.IsExecutorPod(oldPod) && util.IsExecutorPod(newPod) { + h.metrics.HandleSparkExecutorUpdate(oldPod, newPod) + } +} + +// Delete implements handler.EventHandler. +func (h *SparkPodEventHandler) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return + } + + logger.Info("Spark pod deleted", "name", pod.Name, "namespace", pod.Namespace, "phase", pod.Status.Phase) + h.enqueueSparkAppForUpdate(ctx, pod, queue) + + if h.metrics != nil && util.IsExecutorPod(pod) { + h.metrics.HandleSparkExecutorDelete(pod) + } +} + +// Generic implements handler.EventHandler. +func (h *SparkPodEventHandler) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return + } + + logger.Info("Spark pod generic event ", "name", pod.Name, "namespace", pod.Namespace, "phase", pod.Status.Phase) + h.enqueueSparkAppForUpdate(ctx, pod, queue) +} + +func (h *SparkPodEventHandler) enqueueSparkAppForUpdate(ctx context.Context, pod *corev1.Pod, queue workqueue.RateLimitingInterface) { + name := util.GetAppName(pod) + if name == "" { + return + } + namespace := pod.Namespace + key := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + + app := &v1beta2.SparkApplication{} + if submissionID, ok := pod.Labels[common.LabelSubmissionID]; ok { + if err := h.client.Get(ctx, key, app); err != nil { + return + } + if app.Status.SubmissionID != submissionID { + return + } + } + + // Do not enqueue SparkApplication in invalidating state when driver pod get deleted. + if util.GetApplicationState(app) == v1beta2.ApplicationStateInvalidating { + return + } + + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// EventHandler watches SparkApplication events. +type EventHandler struct { + metrics *metrics.SparkApplicationMetrics +} + +var _ handler.EventHandler = &EventHandler{} + +// NewSparkApplicationEventHandler creates a new SparkApplicationEventHandler instance. +func NewSparkApplicationEventHandler(metrics *metrics.SparkApplicationMetrics) *EventHandler { + return &EventHandler{ + metrics: metrics, + } +} + +// Create implements handler.EventHandler. +func (h *EventHandler) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.SparkApplication) + if !ok { + return + } + + logger.Info("SparkApplication created", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) + + if h.metrics != nil { + h.metrics.HandleSparkApplicationCreate(app) + } +} + +// Update implements handler.EventHandler. +func (h *EventHandler) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { + oldApp, ok := event.ObjectOld.(*v1beta2.SparkApplication) + if !ok { + return + } + + newApp, ok := event.ObjectNew.(*v1beta2.SparkApplication) + if !ok { + return + } + + logger.Info("SparkApplication updated", "name", oldApp.Name, "namespace", oldApp.Namespace, "oldState", oldApp.Status.AppState.State, "newState", newApp.Status.AppState.State) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: newApp.Name, Namespace: newApp.Namespace}}) + + if h.metrics != nil { + h.metrics.HandleSparkApplicationUpdate(oldApp, newApp) + } +} + +// Delete implements handler.EventHandler. +func (h *EventHandler) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.SparkApplication) + if !ok { + return + } + + logger.Info("SparkApplication deleted", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) + + if h.metrics != nil { + h.metrics.HandleSparkApplicationDelete(app) + } +} + +// Generic implements handler.EventHandler. +func (h *EventHandler) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.SparkApplication) + if !ok { + return + } + + logger.Info("SparkApplication generic event", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) +} diff --git a/pkg/controller/sparkapplication/monitoring_config.go b/internal/controller/sparkapplication/monitoring_config.go similarity index 55% rename from pkg/controller/sparkapplication/monitoring_config.go rename to internal/controller/sparkapplication/monitoring_config.go index ea88326b1..a4ef7b454 100644 --- a/pkg/controller/sparkapplication/monitoring_config.go +++ b/internal/controller/sparkapplication/monitoring_config.go @@ -22,51 +22,42 @@ import ( "github.com/golang/glog" corev1 "k8s.io/api/core/v1" - apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" ) -const ( - metricsPropertiesKey = "metrics.properties" - prometheusConfigKey = "prometheus.yaml" - prometheusScrapeAnnotation = "prometheus.io/scrape" - prometheusPortAnnotation = "prometheus.io/port" - prometheusPathAnnotation = "prometheus.io/path" -) - -func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient clientset.Interface) error { - port := config.DefaultPrometheusJavaAgentPort +func configPrometheusMonitoring(app *v1beta2.SparkApplication, client client.Client) error { + port := common.DefaultPrometheusJavaAgentPort if app.Spec.Monitoring.Prometheus.Port != nil { port = *app.Spec.Monitoring.Prometheus.Port } // If one or both of the metricsPropertiesFile and Prometheus.ConfigFile are not set - if !app.HasMetricsPropertiesFile() || !app.HasPrometheusConfigFile() { - glog.V(2).Infof("Creating a ConfigMap for metrics and Prometheus configurations.") - configMapName := config.GetPrometheusConfigMapName(app) + if !util.HasMetricsPropertiesFile(app) || !util.HasPrometheusConfigFile(app) { + logger.V(1).Info("Creating a ConfigMap for metrics and Prometheus configurations") + configMapName := util.GetPrometheusConfigMapName(app) configMap := buildPrometheusConfigMap(app, configMapName) - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - cm, err := kubeClient.CoreV1().ConfigMaps(app.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) - if apiErrors.IsNotFound(err) { - _, createErr := kubeClient.CoreV1().ConfigMaps(app.Namespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) - return createErr - } - if err != nil { + key := types.NamespacedName{Namespace: configMap.Namespace, Name: configMap.Name} + if retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + cm := &corev1.ConfigMap{} + if err := client.Get(context.TODO(), key, cm); err != nil { + if errors.IsNotFound(err) { + return client.Create(context.TODO(), configMap) + } return err } - cm.Data = configMap.Data - _, updateErr := kubeClient.CoreV1().ConfigMaps(app.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}) - return updateErr - }) - - if retryErr != nil { - return fmt.Errorf("failed to apply %s in namespace %s: %v", configMapName, app.Namespace, retryErr) + return client.Update(context.TODO(), cm) + }); retryErr != nil { + logger.Error(retryErr, "Failed to create/update Prometheus ConfigMap for SparkApplication", "name", app.Name, "ConfigMap name", configMap.Name, "namespace", app.Namespace) + return retryErr } } @@ -76,10 +67,10 @@ func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient client "-javaagent:%s=%d:%s/%s", app.Spec.Monitoring.Prometheus.JmxExporterJar, port, - config.PrometheusConfigMapMountPath, - prometheusConfigKey) + common.PrometheusConfigMapMountPath, + common.PrometheusConfigKey) - if app.HasPrometheusConfigFile() { + if util.HasPrometheusConfigFile(app) { configFile := *app.Spec.Monitoring.Prometheus.ConfigFile glog.V(2).Infof("Overriding the default Prometheus configuration with config file %s in the Spark image.", configFile) javaOption = fmt.Sprintf("-javaagent:%s=%d:%s", app.Spec.Monitoring.Prometheus.JmxExporterJar, @@ -88,14 +79,14 @@ func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient client /* work around for push gateway issue: https://github.com/prometheus/pushgateway/issues/97 */ metricNamespace := fmt.Sprintf("%s.%s", app.Namespace, app.Name) - metricConf := fmt.Sprintf("%s/%s", config.PrometheusConfigMapMountPath, metricsPropertiesKey) + metricConf := fmt.Sprintf("%s/%s", common.PrometheusConfigMapMountPath, common.MetricsPropertiesKey) if app.Spec.SparkConf == nil { app.Spec.SparkConf = make(map[string]string) } app.Spec.SparkConf["spark.metrics.namespace"] = metricNamespace app.Spec.SparkConf["spark.metrics.conf"] = metricConf - if app.HasMetricsPropertiesFile() { + if util.HasMetricsPropertiesFile(app) { app.Spec.SparkConf["spark.metrics.conf"] = *app.Spec.Monitoring.MetricsPropertiesFile } @@ -103,9 +94,9 @@ func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient client if app.Spec.Driver.Annotations == nil { app.Spec.Driver.Annotations = make(map[string]string) } - app.Spec.Driver.Annotations[prometheusScrapeAnnotation] = "true" - app.Spec.Driver.Annotations[prometheusPortAnnotation] = fmt.Sprintf("%d", port) - app.Spec.Driver.Annotations[prometheusPathAnnotation] = "/metrics" + app.Spec.Driver.Annotations[common.PrometheusScrapeAnnotation] = "true" + app.Spec.Driver.Annotations[common.PrometheusPortAnnotation] = fmt.Sprintf("%d", port) + app.Spec.Driver.Annotations[common.PrometheusPathAnnotation] = "/metrics" if app.Spec.Driver.JavaOptions == nil { app.Spec.Driver.JavaOptions = &javaOption @@ -117,9 +108,9 @@ func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient client if app.Spec.Executor.Annotations == nil { app.Spec.Executor.Annotations = make(map[string]string) } - app.Spec.Executor.Annotations[prometheusScrapeAnnotation] = "true" - app.Spec.Executor.Annotations[prometheusPortAnnotation] = fmt.Sprintf("%d", port) - app.Spec.Executor.Annotations[prometheusPathAnnotation] = "/metrics" + app.Spec.Executor.Annotations[common.PrometheusScrapeAnnotation] = "true" + app.Spec.Executor.Annotations[common.PrometheusPortAnnotation] = fmt.Sprintf("%d", port) + app.Spec.Executor.Annotations[common.PrometheusPathAnnotation] = "/metrics" if app.Spec.Executor.JavaOptions == nil { app.Spec.Executor.JavaOptions = &javaOption @@ -134,27 +125,27 @@ func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient client func buildPrometheusConfigMap(app *v1beta2.SparkApplication, prometheusConfigMapName string) *corev1.ConfigMap { configMapData := make(map[string]string) - if !app.HasMetricsPropertiesFile() { - metricsProperties := config.DefaultMetricsProperties + if !util.HasMetricsPropertiesFile(app) { + metricsProperties := common.DefaultMetricsProperties if app.Spec.Monitoring.MetricsProperties != nil { metricsProperties = *app.Spec.Monitoring.MetricsProperties } - configMapData[metricsPropertiesKey] = metricsProperties + configMapData[common.MetricsPropertiesKey] = metricsProperties } - if !app.HasPrometheusConfigFile() { - prometheusConfig := config.DefaultPrometheusConfiguration + if !util.HasPrometheusConfigFile(app) { + prometheusConfig := common.DefaultPrometheusConfiguration if app.Spec.Monitoring.Prometheus.Configuration != nil { prometheusConfig = *app.Spec.Monitoring.Prometheus.Configuration } - configMapData[prometheusConfigKey] = prometheusConfig + configMapData[common.PrometheusConfigKey] = prometheusConfig } return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: prometheusConfigMapName, Namespace: app.Namespace, - OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, + OwnerReferences: []metav1.OwnerReference{util.GetOwnerReference(app)}, }, Data: configMapData, } diff --git a/internal/controller/sparkapplication/monitoring_config_test.go b/internal/controller/sparkapplication/monitoring_config_test.go new file mode 100644 index 000000000..2b83bb141 --- /dev/null +++ b/internal/controller/sparkapplication/monitoring_config_test.go @@ -0,0 +1,255 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication_test + +// func TestConfigPrometheusMonitoring(t *testing.T) { +// type testcase struct { +// app *v1beta2.SparkApplication +// metricsProperties string +// metricsPropertiesFile string +// prometheusConfig string +// port string +// driverJavaOptions string +// executorJavaOptions string +// } + +// fakeClient := fake.NewSimpleClientset() +// testFn := func(test testcase, t *testing.T) { +// err := configPrometheusMonitoring(test.app, fakeClient) +// if err != nil { +// t.Errorf("failed to configure Prometheus monitoring: %v", err) +// } + +// configMapName := test.app.GetPrometheusConfigMapName() +// configMap, err := fakeClient.CoreV1().ConfigMaps(test.app.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) +// if err != nil { +// t.Errorf("failed to get ConfigMap %s: %v", configMapName, err) +// } + +// if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && +// test.app.Spec.Monitoring.MetricsPropertiesFile == nil && +// len(configMap.Data) != 2 { +// t.Errorf("expected %d data items got %d", 2, len(configMap.Data)) +// } + +// if test.app.Spec.Monitoring.Prometheus.ConfigFile != nil && +// test.app.Spec.Monitoring.MetricsPropertiesFile == nil && +// len(configMap.Data) != 1 { +// t.Errorf("expected %d data items got %d", 1, len(configMap.Data)) +// } + +// if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && +// test.app.Spec.Monitoring.MetricsPropertiesFile != nil && +// len(configMap.Data) != 1 { +// t.Errorf("expected %d data items got %d", 1, len(configMap.Data)) +// } + +// if test.app.Spec.Monitoring.MetricsPropertiesFile == nil && configMap.Data[common.MetricsPropertiesKey] != test.metricsProperties { +// t.Errorf("metrics.properties expected %s got %s", test.metricsProperties, configMap.Data[common.MetricsPropertiesKey]) +// } + +// if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && configMap.Data[common.PrometheusConfigKey] != test.prometheusConfig { +// t.Errorf("prometheus.yaml expected %s got %s", test.prometheusConfig, configMap.Data[common.PrometheusConfigKey]) +// } + +// if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && configMap.Data[common.PrometheusConfigKey] != test.prometheusConfig { +// t.Errorf("prometheus.yaml expected %s got %s", test.prometheusConfig, configMap.Data[common.PrometheusConfigKey]) +// } + +// if test.app.Spec.Monitoring.ExposeDriverMetrics { +// if len(test.app.Spec.Driver.Annotations) != 3 { +// t.Errorf("expected %d driver annotations got %d", 3, len(test.app.Spec.Driver.Annotations)) +// } +// if test.app.Spec.Driver.Annotations[common.PrometheusPortAnnotation] != test.port { +// t.Errorf("java agent port expected %s got %s", test.port, test.app.Spec.Driver.Annotations[common.PrometheusPortAnnotation]) +// } + +// if *test.app.Spec.Driver.JavaOptions != test.driverJavaOptions { +// t.Errorf("driver Java options expected %s got %s", test.driverJavaOptions, *test.app.Spec.Driver.JavaOptions) +// } +// } + +// if test.app.Spec.Monitoring.ExposeExecutorMetrics { +// if len(test.app.Spec.Executor.Annotations) != 3 { +// t.Errorf("expected %d driver annotations got %d", 3, len(test.app.Spec.Executor.Annotations)) +// } +// if test.app.Spec.Executor.Annotations[common.PrometheusPortAnnotation] != test.port { +// t.Errorf("java agent port expected %s got %s", test.port, test.app.Spec.Executor.Annotations[common.PrometheusPortAnnotation]) +// } + +// if *test.app.Spec.Executor.JavaOptions != test.executorJavaOptions { +// t.Errorf("driver Java options expected %s got %s", test.executorJavaOptions, *test.app.Spec.Executor.JavaOptions) +// } +// } + +// if test.app.Spec.Monitoring.MetricsPropertiesFile != nil { +// if test.app.Spec.SparkConf["spark.metrics.conf"] != test.metricsPropertiesFile { +// t.Errorf("expected sparkConf %s got %s", test.metricsPropertiesFile, test.app.Spec.SparkConf["spark.metrics.conf"]) +// } +// } +// } + +// testcases := []testcase{ +// { +// app: &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "app1", +// Namespace: "default", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Monitoring: &v1beta2.MonitoringSpec{ +// ExposeDriverMetrics: true, +// ExposeExecutorMetrics: true, +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "/prometheus/exporter.jar", +// }, +// }, +// }, +// }, +// metricsProperties: common.DefaultMetricsProperties, +// prometheusConfig: common.DefaultPrometheusConfiguration, +// port: fmt.Sprintf("%d", common.DefaultPrometheusJavaAgentPort), +// driverJavaOptions: "-javaagent:/prometheus/exporter.jar=8090:/etc/metrics/conf/prometheus.yaml", +// executorJavaOptions: "-javaagent:/prometheus/exporter.jar=8090:/etc/metrics/conf/prometheus.yaml", +// }, +// { +// app: &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "app2", +// Namespace: "default", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Executor: v1beta2.ExecutorSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Monitoring: &v1beta2.MonitoringSpec{ +// ExposeDriverMetrics: true, +// ExposeExecutorMetrics: true, +// MetricsProperties: util.StringPtr("testcase2dummy"), +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "/prometheus/exporter.jar", +// Port: util.Int32Ptr(8091), +// Configuration: util.StringPtr("testcase2dummy"), +// }, +// }, +// }, +// }, +// metricsProperties: "testcase2dummy", +// prometheusConfig: "testcase2dummy", +// port: "8091", +// driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", +// executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", +// }, +// { +// app: &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "app2", +// Namespace: "default", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Executor: v1beta2.ExecutorSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Monitoring: &v1beta2.MonitoringSpec{ +// ExposeDriverMetrics: true, +// ExposeExecutorMetrics: true, +// MetricsProperties: util.StringPtr("testcase3dummy"), +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "/prometheus/exporter.jar", +// Port: util.Int32Ptr(8091), +// ConfigFile: util.StringPtr("testcase3dummy.yaml"), +// }, +// }, +// }, +// }, +// metricsProperties: "testcase3dummy", +// port: "8091", +// driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase3dummy.yaml", +// executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase3dummy.yaml", +// }, +// { +// app: &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "app2", +// Namespace: "default", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Executor: v1beta2.ExecutorSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Monitoring: &v1beta2.MonitoringSpec{ +// ExposeDriverMetrics: true, +// ExposeExecutorMetrics: true, +// MetricsPropertiesFile: util.StringPtr("/testcase4dummy/metrics.properties"), +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "/prometheus/exporter.jar", +// Port: util.Int32Ptr(8091), +// ConfigFile: util.StringPtr("testcase4dummy.yaml"), +// }, +// }, +// }, +// }, +// metricsPropertiesFile: "/testcase4dummy/metrics.properties", +// port: "8091", +// driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase4dummy.yaml", +// executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase4dummy.yaml", +// }, +// { +// app: &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "app2", +// Namespace: "default", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Executor: v1beta2.ExecutorSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Monitoring: &v1beta2.MonitoringSpec{ +// ExposeDriverMetrics: true, +// ExposeExecutorMetrics: true, +// MetricsPropertiesFile: util.StringPtr("/testcase5dummy/metrics.properties"), +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "/prometheus/exporter.jar", +// Port: util.Int32Ptr(8091), +// }, +// }, +// }, +// }, +// metricsPropertiesFile: "/testcase5dummy/metrics.properties", +// prometheusConfig: common.DefaultPrometheusConfiguration, +// port: "8091", +// driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", +// executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", +// }, +// } + +// for _, test := range testcases { +// testFn(test, t) +// } +// } diff --git a/internal/controller/sparkapplication/submission.go b/internal/controller/sparkapplication/submission.go new file mode 100644 index 000000000..318bb6e4f --- /dev/null +++ b/internal/controller/sparkapplication/submission.go @@ -0,0 +1,1023 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +// submission includes information of a Spark application to be submitted. +type submission struct { + namespace string + name string + args []string +} + +func newSubmission(args []string, app *v1beta2.SparkApplication) *submission { + return &submission{ + namespace: app.Namespace, + name: app.Name, + args: args, + } +} + +func runSparkSubmit(submission *submission) (bool, error) { + sparkHome, present := os.LookupEnv(common.EnvSparkHome) + if !present { + return false, fmt.Errorf("env %s is not specified", common.EnvSparkHome) + } + command := filepath.Join(sparkHome, "bin", "spark-submit") + cmd := exec.Command(command, submission.args...) + _, err := cmd.Output() + if err != nil { + var errorMsg string + if exitErr, ok := err.(*exec.ExitError); ok { + errorMsg = string(exitErr.Stderr) + } + // The driver pod of the application already exists. + if strings.Contains(errorMsg, common.ErrorCodePodAlreadyExists) { + return false, fmt.Errorf("driver pod already exist") + } + if errorMsg != "" { + return false, fmt.Errorf("failed to run spark-submit: %s", errorMsg) + } + return false, fmt.Errorf("failed to run spark-submit: %v", err) + } + return true, nil +} + +// buildSparkSubmitArgs builds the arguments for spark-submit. +func buildSparkSubmitArgs(app *v1beta2.SparkApplication) ([]string, error) { + optionFuncs := []sparkSubmitOptionFunc{ + masterOption, + deployModeOption, + mainClassOption, + nameOption, + dependenciesOption, + namespaceOption, + imageOption, + pythonVersionOption, + memoryOverheadFactorOption, + submissionWaitAppCompletionOption, + sparkConfOption, + hadoopConfOption, + driverPodNameOption, + driverConfOption, + driverSecretOption, + driverEnvOption, + driverVolumeMountsOption, + executorConfOption, + executorSecretOption, + executorVolumeMountsOption, + executorEnvOption, + nodeSelectorOption, + dynamicAllocationOption, + proxyUserOption, + mainApplicationFileOption, + applicationOption, + } + + var args []string + for _, optionFunc := range optionFuncs { + option, err := optionFunc(app) + if err != nil { + return nil, err + } + args = append(args, option...) + } + + return args, nil +} + +type sparkSubmitOptionFunc func(*v1beta2.SparkApplication) ([]string, error) + +func masterOption(_ *v1beta2.SparkApplication) ([]string, error) { + masterURL, err := util.GetMasterURL() + if err != nil { + return nil, fmt.Errorf("failed to get master URL: %v", err) + } + args := []string{ + "--master", + masterURL, + } + return args, nil +} + +func deployModeOption(app *v1beta2.SparkApplication) ([]string, error) { + args := []string{ + "--deploy-mode", + string(app.Spec.Mode), + } + return args, nil +} + +func mainClassOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.MainClass == nil { + return nil, nil + } + args := []string{ + "--class", + *app.Spec.MainClass, + } + return args, nil +} + +func nameOption(app *v1beta2.SparkApplication) ([]string, error) { + args := []string{"--name", app.Name} + return args, nil +} + +func namespaceOption(app *v1beta2.SparkApplication) ([]string, error) { + args := []string{ + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesNamespace, app.Namespace), + } + return args, nil +} + +func driverPodNameOption(app *v1beta2.SparkApplication) ([]string, error) { + args := []string{ + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverPodName, util.GetDriverPodName(app)), + } + return args, nil +} + +func dependenciesOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + + if len(app.Spec.Deps.Jars) > 0 { + args = append(args, "--jars", strings.Join(app.Spec.Deps.Jars, ",")) + } + + if len(app.Spec.Deps.Packages) > 0 { + args = append(args, "--packages", strings.Join(app.Spec.Deps.Packages, ",")) + } + + if len(app.Spec.Deps.ExcludePackages) > 0 { + args = append(args, "--exclude-packages", strings.Join(app.Spec.Deps.ExcludePackages, ",")) + } + + if len(app.Spec.Deps.Repositories) > 0 { + args = append(args, "--repositories", strings.Join(app.Spec.Deps.Repositories, ",")) + } + + if len(app.Spec.Deps.PyFiles) > 0 { + args = append(args, "--py-files", strings.Join(app.Spec.Deps.PyFiles, ",")) + } + + if len(app.Spec.Deps.Files) > 0 { + args = append(args, "--files", strings.Join(app.Spec.Deps.Files, ",")) + } + + return args, nil +} + +func imageOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + if app.Spec.Image == nil || *app.Spec.Image == "" { + return nil, nil + } + args = append(args, + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesContainerImage, *app.Spec.Image), + ) + + if app.Spec.ImagePullPolicy == nil || *app.Spec.ImagePullPolicy == "" { + return nil, nil + } + args = append(args, + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesContainerImagePullPolicy, *app.Spec.ImagePullPolicy), + ) + + if len(app.Spec.ImagePullSecrets) == 0 { + return nil, nil + } + secrets := strings.Join(app.Spec.ImagePullSecrets, ",") + args = append(args, + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesContainerImagePullSecrets, secrets), + ) + + return args, nil +} + +func pythonVersionOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.PythonVersion == nil || *app.Spec.PythonVersion == "" { + return nil, nil + } + args := []string{ + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesPysparkPythonVersion, *app.Spec.PythonVersion), + } + return args, nil +} + +func memoryOverheadFactorOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.MemoryOverheadFactor == nil || *app.Spec.MemoryOverheadFactor == "" { + return nil, nil + } + args := []string{ + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesMemoryOverheadFactor, *app.Spec.MemoryOverheadFactor), + } + return args, nil +} + +func submissionWaitAppCompletionOption(_ *v1beta2.SparkApplication) ([]string, error) { + // spark-submit triggered by Spark operator should never wait for app completion + args := []string{ + "--conf", + fmt.Sprintf("%s=false", common.SparkKubernetesSubmissionWaitAppCompletion), + } + return args, nil +} + +func sparkConfOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.SparkConf == nil { + return nil, nil + } + var args []string + // Add Spark configuration properties. + for key, value := range app.Spec.SparkConf { + // Configuration property for the driver pod name has already been set. + if key != common.SparkKubernetesDriverPodName { + args = append(args, "--conf", fmt.Sprintf("%s=%s", key, value)) + } + } + return args, nil +} + +func hadoopConfOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.HadoopConf == nil { + return nil, nil + } + var args []string + // Add Hadoop configuration properties. + for key, value := range app.Spec.HadoopConf { + args = append(args, "--conf", fmt.Sprintf("spark.hadoop.%s=%s", key, value)) + } + return args, nil +} + +func nodeSelectorOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + for key, value := range app.Spec.NodeSelector { + property := fmt.Sprintf(common.SparkKubernetesNodeSelectorTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + return args, nil +} + +func driverConfOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + var property string + + property = fmt.Sprintf(common.SparkKubernetesDriverLabelTemplate, common.LabelSparkAppName) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, app.Name)) + + property = fmt.Sprintf(common.SparkKubernetesDriverLabelTemplate, common.LabelLaunchedBySparkOperator) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, "true")) + + property = fmt.Sprintf(common.SparkKubernetesDriverLabelTemplate, common.LabelSubmissionID) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, app.Status.SubmissionID)) + + if app.Spec.Driver.Image != nil && *app.Spec.Driver.Image != "" { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverContainerImage, *app.Spec.Driver.Image)) + } else if app.Spec.Image != nil && *app.Spec.Image != "" { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverContainerImage, *app.Spec.Image)) + } else { + return nil, fmt.Errorf("driver container image is not specified") + } + + if app.Spec.Driver.Cores != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkDriverCores, *app.Spec.Driver.Cores)) + } + + if app.Spec.Driver.CoreRequest != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverRequestCores, *app.Spec.Driver.CoreRequest)) + } + + if app.Spec.Driver.CoreLimit != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverLimitCores, *app.Spec.Driver.CoreLimit)) + } + + if app.Spec.Driver.Memory != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkDriverMemory, *app.Spec.Driver.Memory)) + } + + if app.Spec.Driver.MemoryOverhead != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkDriverMemoryOverhead, *app.Spec.Driver.MemoryOverhead)) + } + + if app.Spec.Driver.ServiceAccount != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", + common.SparkKubernetesAuthenticateDriverServiceAccountName, *app.Spec.Driver.ServiceAccount), + ) + } + + if app.Spec.Driver.JavaOptions != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkDriverExtraJavaOptions, *app.Spec.Driver.JavaOptions)) + } + + if app.Spec.Driver.KubernetesMaster != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverMaster, *app.Spec.Driver.KubernetesMaster)) + } + + // Populate SparkApplication labels to driver pod + for key, value := range app.Labels { + property = fmt.Sprintf(common.SparkKubernetesDriverLabelTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Driver.Labels { + property = fmt.Sprintf(common.SparkKubernetesDriverLabelTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Driver.Annotations { + property = fmt.Sprintf(common.SparkKubernetesDriverAnnotationTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Driver.ServiceLabels { + property = fmt.Sprintf(common.SparkKubernetesDriverServiceLabelTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Driver.ServiceAnnotations { + property = fmt.Sprintf(common.SparkKubernetesDriverServiceAnnotationTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Driver.EnvSecretKeyRefs { + property = fmt.Sprintf(common.SparkKubernetesDriverSecretKeyRefTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s:%s", property, value.Name, value.Key)) + } + + return args, nil +} + +// driverSecretOption returns a list of spark-submit arguments for mounting secrets to driver pod. +func driverSecretOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + for _, secret := range app.Spec.Driver.Secrets { + property := fmt.Sprintf(common.SparkKubernetesDriverSecretsTemplate, secret.Name) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, secret.Path)) + if secret.Type == v1beta2.SecretTypeGCPServiceAccount { + property := fmt.Sprintf(common.SparkKubernetesDriverEnvTemplate, common.EnvGoogleApplicationCredentials) + conf := fmt.Sprintf("%s=%s", property, filepath.Join(secret.Path, common.ServiceAccountJSONKeyFileName)) + args = append(args, "--conf", conf) + } else if secret.Type == v1beta2.SecretTypeHadoopDelegationToken { + property := fmt.Sprintf(common.SparkKubernetesDriverEnvTemplate, common.EnvHadoopTokenFileLocation) + conf := fmt.Sprintf("%s=%s", property, filepath.Join(secret.Path, common.HadoopDelegationTokenFileName)) + args = append(args, "--conf", conf) + } + } + return args, nil +} + +func driverVolumeMountsOption(app *v1beta2.SparkApplication) ([]string, error) { + volumes := util.GetLocalVolumes(app) + if volumes == nil { + return nil, nil + } + + volumeMounts := util.GetDriverLocalVolumeMounts(app) + if volumeMounts == nil { + return nil, nil + } + + args := []string{} + for _, volumeMount := range volumeMounts { + volumeName := volumeMount.Name + volume, ok := volumes[volumeName] + if !ok { + return args, fmt.Errorf("volume %s not found", volumeName) + } + + var volumeType string + switch { + case volume.EmptyDir != nil: + volumeType = common.VolumeTypeEmptyDir + case volume.HostPath != nil: + volumeType = common.VolumeTypeHostPath + case volume.NFS != nil: + volumeType = common.VolumeTypeNFS + case volume.PersistentVolumeClaim != nil: + volumeType = common.VolumeTypePersistentVolumeClaim + default: + return nil, fmt.Errorf("unsupported volume type") + } + + if volumeMount.MountPath != "" { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesMountPathTemplate, + volumeType, + volumeName, + ), + volumeMount.MountPath, + ), + ) + } + + if volumeMount.SubPath != "" { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesMountSubPathTemplate, + volumeType, + volumeName, + ), + volumeMount.SubPath, + ), + ) + } + + if volumeMount.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesMountReadOnlyTemplate, + volumeType, + volumeName, + ), + "true", + ), + ) + } + + switch volumeType { + case common.VolumeTypeEmptyDir: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeEmptyDir, + volume.Name, + "sizeLimit", + ), + volume.EmptyDir.SizeLimit.String(), + ), + ) + case common.VolumeTypeHostPath: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeHostPath, + volume.Name, + "path", + ), + volume.HostPath.Path, + ), + ) + + if volume.HostPath.Type != nil { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeHostPath, + volume.Name, + "type", + ), + *volume.HostPath.Type, + ), + ) + } + + case common.VolumeTypeNFS: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "path", + ), + volume.NFS.Path, + ), + ) + + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "server", + ), + volume.NFS.Server, + ), + ) + + if volume.NFS.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "readOnly", + ), + "true", + ), + ) + } + + case common.VolumeTypePersistentVolumeClaim: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypePersistentVolumeClaim, + volume.Name, + "claimName", + ), + volume.PersistentVolumeClaim.ClaimName, + ), + ) + + if volume.PersistentVolumeClaim.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypePersistentVolumeClaim, + volume.Name, + "readOnly", + ), + "true", + ), + ) + } + } + } + return args, nil +} + +// driverEnvOption returns a list of spark-submit arguments for configuring driver environment variables. +func driverEnvOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + for key, value := range app.Spec.Driver.EnvVars { + property := fmt.Sprintf(common.SparkKubernetesDriverEnvTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + return args, nil +} + +func executorConfOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + var property string + + property = fmt.Sprintf(common.SparkKubernetesExecutorLabelTemplate, common.LabelSparkAppName) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, app.Name)) + + property = fmt.Sprintf(common.SparkKubernetesExecutorLabelTemplate, common.LabelLaunchedBySparkOperator) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, "true")) + + property = fmt.Sprintf(common.SparkKubernetesExecutorLabelTemplate, common.LabelSubmissionID) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, app.Status.SubmissionID)) + + if app.Spec.Executor.Instances != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkExecutorInstances, *app.Spec.Executor.Instances)) + } + + if app.Spec.Executor.Image != nil && *app.Spec.Executor.Image != "" { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesExecutorContainerImage, *app.Spec.Executor.Image)) + } else if app.Spec.Image != nil && *app.Spec.Image != "" { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesExecutorContainerImage, *app.Spec.Image)) + } else { + return nil, fmt.Errorf("executor container image is not specified") + } + + if app.Spec.Executor.Cores != nil { + // Property "spark.executor.cores" does not allow float values. + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkExecutorCores, *app.Spec.Executor.Cores)) + } + if app.Spec.Executor.CoreRequest != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesExecutorRequestCores, *app.Spec.Executor.CoreRequest)) + } + if app.Spec.Executor.CoreLimit != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesExecutorLimitCores, *app.Spec.Executor.CoreLimit)) + } + if app.Spec.Executor.Memory != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkExecutorMemory, *app.Spec.Executor.Memory)) + } + if app.Spec.Executor.MemoryOverhead != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkExecutorMemoryOverhead, *app.Spec.Executor.MemoryOverhead)) + } + + if app.Spec.Executor.ServiceAccount != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesAuthenticateExecutorServiceAccountName, *app.Spec.Executor.ServiceAccount)) + } + + if app.Spec.Executor.DeleteOnTermination != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%t", common.SparkKubernetesExecutorDeleteOnTermination, *app.Spec.Executor.DeleteOnTermination)) + } + + // Populate SparkApplication labels to executor pod + for key, value := range app.Labels { + property := fmt.Sprintf(common.SparkKubernetesExecutorLabelTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + for key, value := range app.Spec.Executor.Labels { + property := fmt.Sprintf(common.SparkKubernetesExecutorLabelTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Executor.Annotations { + property := fmt.Sprintf(common.SparkKubernetesExecutorAnnotationTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Executor.EnvSecretKeyRefs { + property := fmt.Sprintf(common.SparkKubernetesExecutorSecretKeyRefTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s:%s", property, value.Name, value.Key)) + } + + if app.Spec.Executor.JavaOptions != nil { + args = append(args, "--conf", fmt.Sprintf("%s=%s", common.SparkExecutorExtraJavaOptions, *app.Spec.Executor.JavaOptions)) + } + + return args, nil +} + +func executorSecretOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + for _, secret := range app.Spec.Executor.Secrets { + property := fmt.Sprintf(common.SparkKubernetesExecutorSecretsTemplate, secret.Name) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, secret.Path)) + switch secret.Type { + case v1beta2.SecretTypeGCPServiceAccount: + property := fmt.Sprintf(common.SparkKubernetesDriverEnvTemplate, common.EnvGoogleApplicationCredentials) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, + filepath.Join(secret.Path, common.ServiceAccountJSONKeyFileName))) + case v1beta2.SecretTypeHadoopDelegationToken: + property := fmt.Sprintf(common.SparkKubernetesDriverEnvTemplate, common.EnvHadoopTokenFileLocation) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, + filepath.Join(secret.Path, common.HadoopDelegationTokenFileName))) + } + } + return args, nil +} + +func executorVolumeMountsOption(app *v1beta2.SparkApplication) ([]string, error) { + volumes := util.GetLocalVolumes(app) + if volumes == nil { + return nil, nil + } + + volumeMounts := util.GetExecutorLocalVolumeMounts(app) + if volumeMounts == nil { + return nil, nil + } + + args := []string{} + for _, volumeMount := range volumeMounts { + volumeName := volumeMount.Name + volume, ok := volumes[volumeName] + if !ok { + return args, fmt.Errorf("volume %s not found", volumeName) + } + + var volumeType string + switch { + case volume.EmptyDir != nil: + volumeType = common.VolumeTypeEmptyDir + case volume.HostPath != nil: + volumeType = common.VolumeTypeHostPath + case volume.NFS != nil: + volumeType = common.VolumeTypeNFS + case volume.PersistentVolumeClaim != nil: + volumeType = common.VolumeTypePersistentVolumeClaim + default: + return nil, fmt.Errorf("unsupported volume type") + } + + if volumeMount.MountPath != "" { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesMountPathTemplate, + volumeType, + volumeName, + ), + volumeMount.MountPath, + ), + ) + } + + if volumeMount.SubPath != "" { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesMountSubPathTemplate, + volumeType, + volumeName, + ), + volumeMount.SubPath, + ), + ) + } + + if volumeMount.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesMountReadOnlyTemplate, + volumeType, + volumeName, + ), + "true", + ), + ) + } + switch volumeType { + case common.VolumeTypeEmptyDir: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeEmptyDir, + volume.Name, + "sizeLimit", + ), + volume.EmptyDir.SizeLimit.String(), + ), + ) + case common.VolumeTypeHostPath: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeHostPath, + volume.Name, + "path", + ), + volume.HostPath.Path, + ), + ) + + if volume.HostPath.Type != nil { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeHostPath, + volume.Name, + "type", + ), + *volume.HostPath.Type, + ), + ) + } + + case common.VolumeTypeNFS: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "path", + ), + volume.NFS.Path, + ), + ) + + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "server", + ), + volume.NFS.Server, + ), + ) + + if volume.NFS.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "readOnly", + ), + "true", + ), + ) + } + + case common.VolumeTypePersistentVolumeClaim: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypePersistentVolumeClaim, + volume.Name, + "claimName", + ), + volume.PersistentVolumeClaim.ClaimName, + ), + ) + + if volume.PersistentVolumeClaim.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypePersistentVolumeClaim, + volume.Name, + "readOnly", + ), + "true", + ), + ) + } + } + } + return args, nil +} + +func executorEnvOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + for key, value := range app.Spec.Executor.EnvVars { + property := fmt.Sprintf(common.SparkExecutorEnvTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + return args, nil +} + +func dynamicAllocationOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.DynamicAllocation == nil || !app.Spec.DynamicAllocation.Enabled { + return nil, nil + } + + var args []string + dynamicAllocation := app.Spec.DynamicAllocation + args = append(args, "--conf", + fmt.Sprintf("%s=true", common.SparkDynamicAllocationEnabled)) + + // Turn on shuffle tracking if dynamic allocation is enabled. + args = append(args, "--conf", + fmt.Sprintf("%s=true", common.SparkDynamicAllocationShuffleTrackingEnabled)) + + if dynamicAllocation.InitialExecutors != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkDynamicAllocationInitialExecutors, *dynamicAllocation.InitialExecutors)) + } + if dynamicAllocation.MinExecutors != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkDynamicAllocationMinExecutors, *dynamicAllocation.MinExecutors)) + } + if dynamicAllocation.MaxExecutors != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkDynamicAllocationMaxExecutors, *dynamicAllocation.MaxExecutors)) + } + if dynamicAllocation.ShuffleTrackingTimeout != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkDynamicAllocationShuffleTrackingTimeout, *dynamicAllocation.ShuffleTrackingTimeout)) + } + + return args, nil +} + +func proxyUserOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.ProxyUser == nil || *app.Spec.ProxyUser == "" { + return nil, nil + } + args := []string{ + "--proxy-user", + *app.Spec.ProxyUser, + } + return args, nil +} + +func mainApplicationFileOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.MainApplicationFile == nil { + return nil, nil + } + args := []string{*app.Spec.MainApplicationFile} + return args, nil +} + +// applicationOption returns the application arguments. +func applicationOption(app *v1beta2.SparkApplication) ([]string, error) { + return app.Spec.Arguments, nil +} diff --git a/internal/controller/sparkapplication/submission_test.go b/internal/controller/sparkapplication/submission_test.go new file mode 100644 index 000000000..878438f84 --- /dev/null +++ b/internal/controller/sparkapplication/submission_test.go @@ -0,0 +1,696 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +// import ( +// "fmt" +// "os" +// "reflect" +// "sort" +// "strconv" +// "testing" + +// "github.com/google/uuid" +// "github.com/stretchr/testify/assert" +// corev1 "k8s.io/api/core/v1" +// "k8s.io/apimachinery/pkg/api/resource" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// "github.com/kubeflow/spark-operator/api/v1beta2" +// "github.com/kubeflow/spark-operator/pkg/common" +// "github.com/kubeflow/spark-operator/pkg/util" +// ) + +// const ( +// VolumeMountPathTemplate = "spark.kubernetes.%s.volumes.%s.%s.mount.path=%s" +// VolumeMountOptionPathTemplate = "spark.kubernetes.%s.volumes.%s.%s.options.%s=%s" +// SparkDriverLabelAnnotationTemplate = "spark.kubernetes.driver.label.sparkoperator.k8s.io/%s=%s" +// SparkDriverLabelTemplate = "spark.kubernetes.driver.label.%s=%s" +// SparkDriverServiceLabelTemplate = "spark.kubernetes.driver.service.label.%s=%s" +// SparkExecutorLabelAnnotationTemplate = "spark.kubernetes.executor.label.sparkoperator.k8s.io/%s=%s" +// SparkExecutorLabelTemplate = "spark.kubernetes.executor.label.%s=%s" +// ) + +// func TestAddLocalDir_HostPath(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 0, len(app.Spec.Volumes)) +// assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 2, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) +// } + +// func TestAddLocalDir_PVC(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ +// ClaimName: "/tmp/mnt-1", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 0, len(app.Spec.Volumes)) +// assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 2, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "persistentVolumeClaim", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "persistentVolumeClaim", volumes[0].Name, "claimName", volumes[0].PersistentVolumeClaim.ClaimName), localDirOptions[1]) +// } + +// func TestAddLocalDir_MixedVolumes(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt-1", +// }, +// }, +// }, +// { +// Name: "log-dir", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/var/log/spark", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// { +// Name: "log-dir", +// MountPath: "/var/log/spark", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 1, len(app.Spec.Volumes)) +// assert.Equal(t, 1, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 2, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) +// } + +// func TestAddLocalDir_MultipleScratchVolumes(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt-1", +// }, +// }, +// }, +// { +// Name: "spark-local-dir-2", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt-2", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// { +// Name: "spark-local-dir-2", +// MountPath: "/tmp/mnt-2", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 0, len(app.Spec.Volumes)) +// assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 4, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[1].Name, volumeMounts[1].MountPath), localDirOptions[2]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[1].Name, "path", volumes[1].HostPath.Path), localDirOptions[3]) +// } + +// func TestAddLocalDir_Executor(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Executor: v1beta2.ExecutorSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 0, len(app.Spec.Volumes)) +// assert.Equal(t, 0, len(app.Spec.Executor.VolumeMounts)) +// assert.Equal(t, 2, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) +// } + +// func TestAddLocalDir_Driver_Executor(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt", +// }, +// }, +// }, +// { +// Name: "test-volume", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/test", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// { +// Name: "test-volume", +// MountPath: "/tmp/test", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// Executor: v1beta2.ExecutorSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 1, len(app.Spec.Volumes)) +// assert.Equal(t, 1, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 1, len(app.Spec.Executor.VolumeMounts)) +// assert.Equal(t, 4, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[2]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[3]) +// } + +// func TestAddEmptyDir_Driver_Executor_WithSizeLimit(t *testing.T) { +// sizeLimit := resource.MustParse("5Gi") +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// EmptyDir: &corev1.EmptyDirVolumeSource{ +// SizeLimit: &sizeLimit, +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// Executor: v1beta2.ExecutorSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 0, len(app.Spec.Volumes)) +// assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 0, len(app.Spec.Executor.VolumeMounts)) +// assert.Equal(t, 4, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[1]) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[2]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[3]) +// } + +// func TestPopulateLabels_Driver_Executor(t *testing.T) { +// const ( +// AppLabelKey = "app-label-key" +// AppLabelValue = "app-label-value" +// DriverLabelKey = "driver-label-key" +// DriverLabelValue = "driver-label-key" +// DriverServiceLabelKey = "driver-svc-label-key" +// DriverServiceLabelValue = "driver-svc-label-value" +// ExecutorLabelKey = "executor-label-key" +// ExecutorLabelValue = "executor-label-key" +// ) + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// Labels: map[string]string{AppLabelKey: AppLabelValue}, +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// ServiceLabels: map[string]string{DriverServiceLabelKey: DriverServiceLabelValue}, +// SparkPodSpec: v1beta2.SparkPodSpec{ +// Labels: map[string]string{DriverLabelKey: DriverLabelValue}, +// }, +// }, +// Executor: v1beta2.ExecutorSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// Labels: map[string]string{ExecutorLabelKey: ExecutorLabelValue}, +// }, +// }, +// }, +// } + +// submissionID := uuid.New().String() +// driverOptions, err := addDriverConfOptions(app, submissionID) +// if err != nil { +// t.Fatal(err) +// } +// assert.Equal(t, 6, len(driverOptions)) +// sort.Strings(driverOptions) +// expectedDriverLabels := []string{ +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "app-name", "spark-test"), +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "submission-id", submissionID), +// fmt.Sprintf(SparkDriverLabelTemplate, AppLabelKey, AppLabelValue), +// fmt.Sprintf(SparkDriverLabelTemplate, DriverLabelKey, DriverLabelValue), +// fmt.Sprintf(SparkDriverServiceLabelTemplate, DriverServiceLabelKey, DriverServiceLabelValue), +// } +// sort.Strings(expectedDriverLabels) + +// if !reflect.DeepEqual(expectedDriverLabels, driverOptions) { +// t.Errorf("Executor labels: wanted %+q got %+q", expectedDriverLabels, driverOptions) +// } + +// executorOptions, err := addExecutorConfOptions(app, submissionID) +// sort.Strings(executorOptions) +// if err != nil { +// t.Fatal(err) +// } +// assert.Equal(t, 5, len(executorOptions)) +// expectedExecutorLabels := []string{ +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "app-name", "spark-test"), +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "submission-id", submissionID), +// fmt.Sprintf(SparkExecutorLabelTemplate, AppLabelKey, AppLabelValue), +// fmt.Sprintf(SparkExecutorLabelTemplate, ExecutorLabelKey, ExecutorLabelValue), +// } +// sort.Strings(expectedExecutorLabels) + +// if !reflect.DeepEqual(expectedExecutorLabels, executorOptions) { +// t.Errorf("Executor labels: wanted %+q got %+q", expectedExecutorLabels, executorOptions) +// } +// } + +// func TestPopulateLabelsOverride_Driver_Executor(t *testing.T) { +// const ( +// AppLabelKey = "app-label-key" +// AppLabelValue = "app-label-value" +// DriverLabelKey = "driver-label-key" +// DriverLabelValue = "driver-label-key" +// DriverAppLabelOverride = "driver-app-label-override" +// ExecutorLabelKey = "executor-label-key" +// ExecutorLabelValue = "executor-label-key" +// ExecutorAppLabelOverride = "executor-app-label-override" +// ) + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// Labels: map[string]string{AppLabelKey: AppLabelValue}, +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// Labels: map[string]string{DriverLabelKey: DriverLabelValue, AppLabelKey: DriverAppLabelOverride}, +// }, +// }, +// Executor: v1beta2.ExecutorSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// Labels: map[string]string{ExecutorLabelKey: ExecutorLabelValue, AppLabelKey: ExecutorAppLabelOverride}, +// }, +// }, +// }, +// } + +// submissionID := uuid.New().String() +// driverOptions, err := addDriverConfOptions(app, submissionID) +// if err != nil { +// t.Fatal(err) +// } +// sort.Strings(driverOptions) +// assert.Equal(t, 5, len(driverOptions)) +// expectedDriverLabels := []string{ +// fmt.Sprintf(SparkDriverLabelTemplate, AppLabelKey, DriverAppLabelOverride), +// fmt.Sprintf(SparkDriverLabelTemplate, DriverLabelKey, DriverLabelValue), +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "app-name", "spark-test"), +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "submission-id", submissionID), +// } +// sort.Strings(expectedDriverLabels) + +// if !reflect.DeepEqual(expectedDriverLabels, driverOptions) { +// t.Errorf("Executor labels: wanted %+q got %+q", expectedDriverLabels, driverOptions) +// } + +// executorOptions, err := addExecutorConfOptions(app, submissionID) +// if err != nil { +// t.Fatal(err) +// } +// sort.Strings(executorOptions) +// assert.Equal(t, 5, len(executorOptions)) +// expectedExecutorLabels := []string{ +// fmt.Sprintf(SparkExecutorLabelTemplate, AppLabelKey, ExecutorAppLabelOverride), +// fmt.Sprintf(SparkExecutorLabelTemplate, ExecutorLabelKey, ExecutorLabelValue), +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "app-name", "spark-test"), +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "submission-id", submissionID), +// } +// sort.Strings(expectedExecutorLabels) + +// if !reflect.DeepEqual(expectedExecutorLabels, executorOptions) { +// t.Errorf("Executor labels: wanted %+q got %+q", expectedExecutorLabels, executorOptions) +// } +// } + +// func TestDynamicAllocationOptions(t *testing.T) { +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{}, +// } +// options := addDynamicAllocationConfOptions(app) +// assert.Equal(t, 0, len(options)) + +// app = &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DynamicAllocation: &v1beta2.DynamicAllocation{ +// Enabled: true, +// InitialExecutors: util.Int32Ptr(2), +// MinExecutors: util.Int32Ptr(0), +// MaxExecutors: util.Int32Ptr(10), +// ShuffleTrackingTimeout: util.Int64Ptr(6000000), +// }, +// }, +// } + +// options = addDynamicAllocationConfOptions(app) +// assert.Equal(t, 6, len(options)) +// assert.Equal(t, fmt.Sprintf("%s=true", common.SparkDynamicAllocationEnabled), options[0]) +// assert.Equal(t, fmt.Sprintf("%s=true", common.SparkDynamicAllocationShuffleTrackingEnabled), options[1]) +// assert.Equal(t, fmt.Sprintf("%s=2", common.SparkDynamicAllocationInitialExecutors), options[2]) +// assert.Equal(t, fmt.Sprintf("%s=0", common.SparkDynamicAllocationMinExecutors), options[3]) +// assert.Equal(t, fmt.Sprintf("%s=10", common.SparkDynamicAllocationMaxExecutors), options[4]) +// assert.Equal(t, fmt.Sprintf("%s=6000000", common.SparkDynamicAllocationShuffleTrackingTimeout), options[5]) +// } + +// func TestProxyUserArg(t *testing.T) { +// const ( +// host = "localhost" +// port = "6443" +// ) + +// if err := os.Setenv(common.EnvKubernetesServiceHost, host); err != nil { +// t.Fatal(err) +// } +// if err := os.Setenv(common.EnvKubernetesServicePort, port); err != nil { +// t.Fatal(err) +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Mode: v1beta2.ClusterMode, +// ProxyUser: util.StringPtr("foo"), +// }, +// } + +// submissionID := uuid.New().String() +// driverPodName := app.GetDriverPodName() +// args, err := buildSubmissionCommandArgs(app, driverPodName, submissionID) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, "--master", args[0]) +// assert.Equal(t, fmt.Sprintf("k8s://https://%s:%s", host, port), args[1]) +// assert.Equal(t, "--deploy-mode", args[2]) +// assert.Equal(t, string(v1beta2.ClusterMode), args[3]) +// assert.Equal(t, "--proxy-user", args[4]) +// assert.Equal(t, "foo", args[5]) +// } + +// func Test_getMasterURL(t *testing.T) { +// setEnv := func(host string, port string) { +// if err := os.Setenv(common.EnvKubernetesServiceHost, host); err != nil { +// t.Fatal(err) +// } +// if err := os.Setenv(common.EnvKubernetesServicePort, port); err != nil { +// t.Fatal(err) +// } +// } + +// tests := []struct { +// name string +// host string +// port string +// want string +// wantErr assert.ErrorAssertionFunc +// }{ +// { +// name: "should return a valid master url when IPv4 address is used", +// host: "localhost", +// port: "6443", +// want: "k8s://https://localhost:6443", +// wantErr: assert.NoError, +// }, +// { +// name: "should return a valid master url when IPv6 address is used", +// host: "::1", +// port: "6443", +// want: "k8s://https://[::1]:6443", +// wantErr: assert.NoError, +// }, +// { +// name: "should throw an error when the host is empty", +// host: "", +// port: "6443", +// want: "", +// wantErr: assert.Error, +// }, +// } +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// setEnv(tt.host, tt.port) +// got, err := getMasterURL() +// if !tt.wantErr(t, err, "getMasterURL()") { +// return +// } +// assert.Equalf(t, tt.want, got, "getMasterURL()") +// }) +// } +// } diff --git a/internal/controller/sparkapplication/suite_test.go b/internal/controller/sparkapplication/suite_test.go new file mode 100644 index 000000000..02ce4c260 --- /dev/null +++ b/internal/controller/sparkapplication/suite_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication_test + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + log.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "..", "bin", "k8s", + fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = v1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/internal/controller/sparkapplication/validator.go b/internal/controller/sparkapplication/validator.go new file mode 100644 index 000000000..0cb985078 --- /dev/null +++ b/internal/controller/sparkapplication/validator.go @@ -0,0 +1,44 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +type Validator struct{} + +// Validator implements admission.CustomValidator. +var _ admission.CustomValidator = &Validator{} + +// ValidateCreate implements admission.CustomValidator. +func (s *Validator) ValidateCreate(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +// ValidateDelete implements admission.CustomValidator. +func (s *Validator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +// ValidateUpdate implements admission.CustomValidator. +func (s *Validator) ValidateUpdate(_ context.Context, _ runtime.Object, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} diff --git a/internal/controller/sparkapplication/web_ui.go b/internal/controller/sparkapplication/web_ui.go new file mode 100644 index 000000000..284b4b6f4 --- /dev/null +++ b/internal/controller/sparkapplication/web_ui.go @@ -0,0 +1,92 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +func (r *Reconciler) createWebUIService(app *v1beta2.SparkApplication) (*SparkService, error) { + portName := getWebUIServicePortName(app) + port, err := getWebUIServicePort(app) + if err != nil { + return nil, fmt.Errorf("invalid Spark UI servicePort: %d", port) + } + + targetPort, err := getWebUITargetPort(app) + if err != nil { + return nil, fmt.Errorf("invalid Spark UI targetPort: %d", targetPort) + } + + serviceName := util.GetDefaultUIServiceName(app) + serviceType := util.GetWebUIServiceType(app) + serviceLabels := util.GetWebUIServiceLabels(app) + serviceAnnotations := util.GetWebUIServiceAnnotations(app) + + return r.createDriverIngressService(app, portName, port, targetPort, serviceName, serviceType, serviceAnnotations, serviceLabels) +} + +func (r *Reconciler) createWebUIIngress(app *v1beta2.SparkApplication, service SparkService, ingressURL *url.URL, ingressClassName string) (*SparkIngress, error) { + ingressName := util.GetDefaultUIIngressName(app) + if util.IngressCapabilities.Has("networking.k8s.io/v1") { + return r.createDriverIngressV1(app, service, ingressName, ingressURL, ingressClassName) + } + return r.createDriverIngressLegacy(app, service, ingressName, ingressURL) +} + +func getWebUIServicePortName(app *v1beta2.SparkApplication) string { + if app.Spec.SparkUIOptions == nil { + return common.DefaultSparkWebUIPortName + } + portName := app.Spec.SparkUIOptions.ServicePortName + if portName != nil { + return *portName + } + return common.DefaultSparkWebUIPortName +} + +func getWebUIServicePort(app *v1beta2.SparkApplication) (int32, error) { + if app.Spec.SparkUIOptions == nil { + return getWebUITargetPort(app) + } + port := app.Spec.SparkUIOptions.ServicePort + if port != nil { + return *port, nil + } + return common.DefaultSparkWebUIPort, nil +} + +// getWebUITargetPort attempts to get the Spark web UI port from configuration property spark.ui.port +// in Spec.SparkConf if it is present, otherwise the default port is returned. +// Note that we don't attempt to get the port from Spec.SparkConfigMap. +func getWebUITargetPort(app *v1beta2.SparkApplication) (int32, error) { + portStr, ok := app.Spec.SparkConf[common.SparkUIPortKey] + if !ok { + return common.DefaultSparkWebUIPort, nil + } + port, err := strconv.Atoi(portStr) + if err != nil { + return common.DefaultSparkWebUIPort, nil + } + return int32(port), nil +} diff --git a/internal/controller/sparkapplication/web_ui_test.go b/internal/controller/sparkapplication/web_ui_test.go new file mode 100644 index 000000000..a2d1566f3 --- /dev/null +++ b/internal/controller/sparkapplication/web_ui_test.go @@ -0,0 +1,655 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +// func TestCreateSparkUIService(t *testing.T) { +// type testcase struct { +// name string +// app *v1beta2.SparkApplication +// expectedService SparkService +// expectedSelector map[string]string +// expectError bool +// } +// testFn := func(test testcase, t *testing.T) { +// fakeClient := fake.NewSimpleClientset() +// util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true} +// sparkService, err := createSparkUIService(test.app, fakeClient) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// if sparkService.serviceName != test.expectedService.serviceName { +// t.Errorf("%s: for service name wanted %s got %s", test.name, test.expectedService.serviceName, sparkService.serviceName) +// } +// service, err := fakeClient.CoreV1(). +// Services(test.app.Namespace). +// Get(context.TODO(), sparkService.serviceName, metav1.GetOptions{}) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// if service.Labels[common.SparkAppNameLabel] != test.app.Name { +// t.Errorf("%s: service of app %s has the wrong labels", test.name, test.app.Name) +// } +// if !reflect.DeepEqual(test.expectedSelector, service.Spec.Selector) { +// t.Errorf("%s: for label selector wanted %s got %s", test.name, test.expectedSelector, service.Spec.Selector) +// } +// if service.Spec.Type != test.expectedService.serviceType { +// t.Errorf("%s: for service type wanted %s got %s", test.name, test.expectedService.serviceType, service.Spec.Type) +// } +// if len(service.Spec.Ports) != 1 { +// t.Errorf("%s: wanted a single port got %d ports", test.name, len(service.Spec.Ports)) +// } +// port := service.Spec.Ports[0] +// if port.Port != test.expectedService.servicePort { +// t.Errorf("%s: unexpected port wanted %d got %d", test.name, test.expectedService.servicePort, port.Port) +// } +// if port.Name != test.expectedService.servicePortName { +// t.Errorf("%s: unexpected port name wanted %s got %s", test.name, test.expectedService.servicePortName, port.Name) +// } +// serviceAnnotations := service.ObjectMeta.Annotations +// if !reflect.DeepEqual(serviceAnnotations, test.expectedService.serviceAnnotations) { +// t.Errorf("%s: unexpected annotations wanted %s got %s", test.name, test.expectedService.serviceAnnotations, serviceAnnotations) +// } +// serviceLabels := service.ObjectMeta.Labels +// if !reflect.DeepEqual(serviceLabels, test.expectedService.serviceLabels) { +// t.Errorf("%s: unexpected labels wanted %s got %s", test.name, test.expectedService.serviceLabels, serviceLabels) +// } +// } +// defaultPort := defaultSparkWebUIPort +// defaultPortName := defaultSparkWebUIPortName +// app1 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo1", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkConf: map[string]string{ +// sparkUIPortConfigurationKey: "4041", +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// ExecutionAttempts: 1, +// }, +// } +// app2 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo2", +// Namespace: "default", +// UID: "foo-123", +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-2", +// ExecutionAttempts: 2, +// }, +// } +// app3 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo3", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkConf: map[string]string{ +// sparkUIPortConfigurationKey: "4041x", +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-3", +// }, +// } +// var appPort int32 = 80 +// app4 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo4", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServicePort: &appPort, +// IngressAnnotations: nil, +// IngressTLS: nil, +// }, +// SparkConf: map[string]string{ +// sparkUIPortConfigurationKey: "4041", +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-3", +// }, +// } +// var serviceTypeNodePort apiv1.ServiceType = apiv1.ServiceTypeNodePort +// app5 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo5", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServiceType: &serviceTypeNodePort, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-2", +// ExecutionAttempts: 2, +// }, +// } +// appPortName := "http-spark-test" +// app6 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo6", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServicePort: &appPort, +// ServicePortName: &appPortName, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-6", +// }, +// } +// app7 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo7", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServiceAnnotations: map[string]string{ +// "key": "value", +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-7", +// ExecutionAttempts: 1, +// }, +// } +// app8 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo8", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServiceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo8", +// "key": "value", +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-8", +// ExecutionAttempts: 1, +// }, +// } +// testcases := []testcase{ +// { +// name: "service with custom serviceport and serviceport and target port are same", +// app: app1, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app1.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: defaultPortName, +// servicePort: 4041, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo1", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo1", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with default port", +// app: app2, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app2.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: defaultPortName, +// servicePort: int32(defaultPort), +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo2", +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo2", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom serviceport and serviceport and target port are different", +// app: app4, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app4.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: defaultPortName, +// servicePort: 80, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo4", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo4", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom servicetype", +// app: app5, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app5.GetName()), +// serviceType: apiv1.ServiceTypeNodePort, +// servicePortName: defaultPortName, +// servicePort: int32(defaultPort), +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo5", +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo5", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom serviceportname", +// app: app6, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app6.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: "http-spark-test", +// servicePort: int32(80), +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo6", +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo6", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with annotation", +// app: app7, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app7.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: defaultPortName, +// servicePort: defaultPort, +// serviceAnnotations: map[string]string{ +// "key": "value", +// }, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo7", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo7", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom labels", +// app: app8, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app8.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: defaultPortName, +// servicePort: defaultPort, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo8", +// "key": "value", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo8", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with bad port configurations", +// app: app3, +// expectError: true, +// }, +// } +// for _, test := range testcases { +// testFn(test, t) +// } +// } + +// func TestCreateSparkUIIngress(t *testing.T) { +// type testcase struct { +// name string +// app *v1beta2.SparkApplication +// expectedIngress SparkIngress +// expectError bool +// } + +// testFn := func(test testcase, t *testing.T, ingressURLFormat string, ingressClassName string) { +// fakeClient := fake.NewSimpleClientset() +// sparkService, err := createSparkUIService(test.app, fakeClient) +// if err != nil { +// t.Fatal(err) +// } +// ingressURL, err := getDriverIngressURL(ingressURLFormat, test.app.Name, test.app.Namespace) +// if err != nil { +// t.Fatal(err) +// } +// sparkIngress, err := createSparkUIIngress(test.app, *sparkService, ingressURL, ingressClassName, fakeClient) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// if sparkIngress.ingressName != test.expectedIngress.ingressName { +// t.Errorf("Ingress name wanted %s got %s", test.expectedIngress.ingressName, sparkIngress.ingressName) +// } +// if sparkIngress.ingressURL.String() != test.expectedIngress.ingressURL.String() { +// t.Errorf("Ingress URL wanted %s got %s", test.expectedIngress.ingressURL, sparkIngress.ingressURL) +// } +// ingress, err := fakeClient.NetworkingV1().Ingresses(test.app.Namespace). +// Get(context.TODO(), sparkIngress.ingressName, metav1.GetOptions{}) +// if err != nil { +// t.Fatal(err) +// } +// if len(ingress.Annotations) != 0 { +// for key, value := range ingress.Annotations { +// if test.expectedIngress.annotations[key] != ingress.Annotations[key] { +// t.Errorf("Expected annotation: %s=%s but found : %s=%s", key, value, key, ingress.Annotations[key]) +// } +// } +// } +// if len(ingress.Spec.TLS) != 0 { +// for _, ingressTls := range ingress.Spec.TLS { +// if ingressTls.Hosts[0] != test.expectedIngress.ingressTLS[0].Hosts[0] { +// t.Errorf("Expected ingressTls host: %s but found : %s", test.expectedIngress.ingressTLS[0].Hosts[0], ingressTls.Hosts[0]) +// } +// if ingressTls.SecretName != test.expectedIngress.ingressTLS[0].SecretName { +// t.Errorf("Expected ingressTls secretName: %s but found : %s", test.expectedIngress.ingressTLS[0].SecretName, ingressTls.SecretName) +// } +// } +// } +// if ingress.Labels[common.SparkAppNameLabel] != test.app.Name { +// t.Errorf("Ingress of app %s has the wrong labels", test.app.Name) +// } + +// if len(ingress.Spec.Rules) != 1 { +// t.Errorf("No Ingress rules found.") +// } +// ingressRule := ingress.Spec.Rules[0] +// // If we have a path, then the ingress adds capture groups +// if ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "" && ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "/" { +// test.expectedIngress.ingressURL.Path = test.expectedIngress.ingressURL.Path + "(/|$)(.*)" +// } +// if ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path != test.expectedIngress.ingressURL.Host+test.expectedIngress.ingressURL.Path { + +// t.Errorf("Ingress of app %s has the wrong host %s", ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path, test.expectedIngress.ingressURL.Host+test.expectedIngress.ingressURL.Path) +// } + +// if len(ingressRule.IngressRuleValue.HTTP.Paths) != 1 { +// t.Errorf("No Ingress paths found.") +// } +// ingressPath := ingressRule.IngressRuleValue.HTTP.Paths[0] +// if ingressPath.Backend.Service.Name != sparkService.serviceName { +// t.Errorf("Service name wanted %s got %s", sparkService.serviceName, ingressPath.Backend.Service.Name) +// } +// if *ingressPath.PathType != networkingv1.PathTypeImplementationSpecific { +// t.Errorf("PathType wanted %s got %s", networkingv1.PathTypeImplementationSpecific, *ingressPath.PathType) +// } +// if ingressPath.Backend.Service.Port.Number != sparkService.servicePort { +// t.Errorf("Service port wanted %v got %v", sparkService.servicePort, ingressPath.Backend.Service.Port.Number) +// } +// } + +// var appPort int32 = 80 +// app1 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app2 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app3 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// IngressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app4 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// }, +// IngressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: ""}, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } + +// testcases := []testcase{ +// { +// name: "simple ingress object", +// app: app1, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), +// ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), +// }, +// expectError: false, +// }, +// { +// name: "ingress with annotations and without tls configuration", +// app: app2, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app2.GetName()), +// ingressURL: parseURLAndAssertError(app2.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// }, +// expectError: false, +// }, +// { +// name: "ingress with annotations and tls configuration", +// app: app3, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app3.GetName()), +// ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// ingressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, +// }, +// }, +// expectError: false, +// }, +// { +// name: "ingress with incomplete list of annotations", +// app: app4, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app4.GetName()), +// ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// ingressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: ""}, +// }, +// }, +// expectError: true, +// }, +// } + +// for _, test := range testcases { +// testFn(test, t, "{{$appName}}.ingress.clusterName.com", "") +// } + +// testcases = []testcase{ +// { +// name: "simple ingress object with ingress URL Format with path", +// app: app1, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), +// ingressURL: parseURLAndAssertError("ingress.clusterName.com/"+app1.GetNamespace()+"/"+app1.GetName(), t), +// annotations: map[string]string{ +// "nginx.ingress.kubernetes.io/rewrite-target": "/$2", +// }, +// }, +// expectError: false, +// }, +// } + +// for _, test := range testcases { +// testFn(test, t, "ingress.clusterName.com/{{$appNamespace}}/{{$appName}}", "") +// } + +// testcases = []testcase{ +// { +// name: "simple ingress object with ingressClassName set", +// app: app1, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), +// ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), +// ingressClassName: "nginx", +// }, +// expectError: false, +// }, +// } +// for _, test := range testcases { +// testFn(test, t, "{{$appName}}.ingress.clusterName.com", "nginx") +// } +// } + +// func parseURLAndAssertError(testURL string, t *testing.T) *url.URL { +// fallbackURL, _ := url.Parse("http://example.com") +// parsedURL, err := url.Parse(testURL) +// if err != nil { +// t.Errorf("failed to parse the url: %s", testURL) +// return fallbackURL +// } +// if parsedURL.Scheme == "" { +// //url does not contain any scheme, adding http:// so url.Parse can function correctly +// parsedURL, err = url.Parse("http://" + testURL) +// if err != nil { +// t.Errorf("failed to parse the url: %s", testURL) +// return fallbackURL +// } +// } +// return parsedURL +// } diff --git a/internal/controller/validatingwebhookconfiguration/controller.go b/internal/controller/validatingwebhookconfiguration/controller.go new file mode 100644 index 000000000..7c641da06 --- /dev/null +++ b/internal/controller/validatingwebhookconfiguration/controller.go @@ -0,0 +1,100 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validatingwebhookconfiguration + +import ( + "context" + "fmt" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/pkg/certificate" +) + +var ( + logger = ctrl.Log.WithName("") +) + +// Reconciler reconciles a ValidatingWebhookConfiguration object. +type Reconciler struct { + client client.Client + certProvider *certificate.Provider + name string +} + +// ValidatingWebhookConfigurationReconciler implements reconcile.Reconciler interface. +var _ reconcile.Reconciler = &Reconciler{} + +// NewReconciler creates a new ValidatingWebhookConfigurationReconciler instance. +func NewReconciler(client client.Client, certProvider *certificate.Provider, name string) *Reconciler { + return &Reconciler{ + client: client, + certProvider: certProvider, + name: name, + } +} + +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + Named("validating-webhook-configuration-controller"). + Watches( + &admissionregistrationv1.ValidatingWebhookConfiguration{}, + NewEventHandler(), + builder.WithPredicates( + NewEventFilter(r.name), + ), + ). + WithOptions(options). + Complete(r) +} + +// Reconcile implements reconcile.Reconciler. +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger.Info("Updating CA bundle of ValidatingWebhookConfiguration", "name", req.Name) + if err := r.updateValidatingWebhookConfiguration(ctx, req.NamespacedName); err != nil { + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) updateValidatingWebhookConfiguration(ctx context.Context, key types.NamespacedName) error { + webhook := &admissionregistrationv1.ValidatingWebhookConfiguration{} + if err := r.client.Get(ctx, key, webhook); err != nil { + return fmt.Errorf("failed to get validating webhook configuration %v: %v", key, err) + } + + caBundle, err := r.certProvider.CACert() + if err != nil { + return fmt.Errorf("failed to get CA certificate: %v", err) + } + + newWebhook := webhook.DeepCopy() + for i := range newWebhook.Webhooks { + newWebhook.Webhooks[i].ClientConfig.CABundle = caBundle + } + if err := r.client.Update(ctx, newWebhook); err != nil { + return fmt.Errorf("failed to update validating webhook configuration %v: %v", key, err) + } + + return nil +} diff --git a/internal/controller/validatingwebhookconfiguration/event_filter.go b/internal/controller/validatingwebhookconfiguration/event_filter.go new file mode 100644 index 000000000..d78076e4b --- /dev/null +++ b/internal/controller/validatingwebhookconfiguration/event_filter.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validatingwebhookconfiguration + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// EventFilter filters events for the ValidatingWebhookConfiguration. +type EventFilter struct { + name string +} + +func NewEventFilter(name string) *EventFilter { + return &EventFilter{ + name: name, + } +} + +// ValidatingWebhookConfigurationEventFilter implements predicate.Predicate interface. +var _ predicate.Predicate = &EventFilter{} + +// Create implements predicate.Predicate. +func (f *EventFilter) Create(e event.CreateEvent) bool { + return e.Object.GetName() == f.name +} + +// Update implements predicate.Predicate. +func (f *EventFilter) Update(e event.UpdateEvent) bool { + return e.ObjectOld.GetName() == f.name +} + +// Delete implements predicate.Predicate. +func (f *EventFilter) Delete(event.DeleteEvent) bool { + return false +} + +// Generic implements predicate.Predicate. +func (f *EventFilter) Generic(event.GenericEvent) bool { + return false +} diff --git a/internal/controller/validatingwebhookconfiguration/event_handler.go b/internal/controller/validatingwebhookconfiguration/event_handler.go new file mode 100644 index 000000000..a35a47307 --- /dev/null +++ b/internal/controller/validatingwebhookconfiguration/event_handler.go @@ -0,0 +1,102 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validatingwebhookconfiguration + +import ( + "context" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" +) + +// EventHandler handles ValidatingWebhookConfiguration events. +type EventHandler struct{} + +var _ handler.EventHandler = &EventHandler{} + +// NewEventHandler creates a new ValidatingWebhookConfigurationEventHandler instance. +func NewEventHandler() *EventHandler { + return &EventHandler{} +} + +// Create implements handler.EventHandler. +func (h *EventHandler) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { + vwc, ok := event.Object.(*admissionregistrationv1.ValidatingWebhookConfiguration) + if !ok { + return + } + logger.Info("ValidatingWebhookConfiguration created", "name", vwc.Name) + key := types.NamespacedName{ + Namespace: vwc.Namespace, + Name: vwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Update implements handler.EventHandler. +func (h *EventHandler) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { + oldWebhook, ok := event.ObjectOld.(*admissionregistrationv1.ValidatingWebhookConfiguration) + if !ok { + return + } + newWebhook, ok := event.ObjectNew.(*admissionregistrationv1.ValidatingWebhookConfiguration) + if !ok { + return + } + if newWebhook.ResourceVersion == oldWebhook.ResourceVersion { + return + } + + logger.Info("ValidatingWebhookConfiguration updated", "name", newWebhook.Name, "namespace", newWebhook.Namespace) + key := types.NamespacedName{ + Namespace: newWebhook.Namespace, + Name: newWebhook.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Delete implements handler.EventHandler. +func (h *EventHandler) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { + vwc, ok := event.Object.(*admissionregistrationv1.ValidatingWebhookConfiguration) + if !ok { + return + } + logger.Info("ValidatingWebhookConfiguration deleted", "name", vwc.Name, "namespace", vwc.Namespace) + key := types.NamespacedName{ + Namespace: vwc.Namespace, + Name: vwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Generic implements handler.EventHandler. +func (h *EventHandler) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { + vwc, ok := event.Object.(*admissionregistrationv1.ValidatingWebhookConfiguration) + if !ok { + return + } + logger.Info("ValidatingWebhookConfiguration generic event", "name", vwc.Name, "namespace", vwc.Namespace) + key := types.NamespacedName{ + Namespace: vwc.Namespace, + Name: vwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} diff --git a/internal/metrics/metrcis.go b/internal/metrics/metrcis.go new file mode 100644 index 000000000..825010755 --- /dev/null +++ b/internal/metrics/metrcis.go @@ -0,0 +1,23 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "sigs.k8s.io/controller-runtime/pkg/log" + +var ( + logger = log.Log.WithName("") +) diff --git a/internal/metrics/sparkapplication_metrics.go b/internal/metrics/sparkapplication_metrics.go new file mode 100644 index 000000000..5a52a55d6 --- /dev/null +++ b/internal/metrics/sparkapplication_metrics.go @@ -0,0 +1,386 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +type SparkApplicationMetrics struct { + prefix string + labels []string + jobStartLatencyBuckets []float64 + + count *prometheus.CounterVec + submitCount *prometheus.CounterVec + failedSubmissionCount *prometheus.CounterVec + runningCount *prometheus.GaugeVec + successCount *prometheus.CounterVec + failureCount *prometheus.CounterVec + + successExecutionTimeSeconds *prometheus.SummaryVec + failureExecutionTimeSeconds *prometheus.SummaryVec + + startLatencySeconds *prometheus.SummaryVec + startLatencySecondsHistogram *prometheus.HistogramVec +} + +func NewSparkApplicationMetrics(prefix string, labels []string, jobStartLatencyBuckets []float64) *SparkApplicationMetrics { + validLabels := make([]string, 0, len(labels)) + for _, label := range labels { + validLabel := util.CreateValidMetricNameLabel("", label) + validLabels = append(validLabels, validLabel) + } + + return &SparkApplicationMetrics{ + prefix: prefix, + labels: validLabels, + jobStartLatencyBuckets: jobStartLatencyBuckets, + + count: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationCount), + Help: "Total number of SparkApplication", + }, + validLabels, + ), + submitCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationSubmitCount), + Help: "Total number of submitted SparkApplication", + }, + validLabels, + ), + failedSubmissionCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationFailedSubmissionCount), + Help: "Total number of failed SparkApplication submission", + }, + validLabels, + ), + runningCount: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationRunningCount), + Help: "Total number of running SparkApplication", + }, + validLabels, + ), + successCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationSuccessCount), + Help: "Total number of successful SparkApplication", + }, + validLabels, + ), + failureCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationFailureCount), + Help: "Total number of failed SparkApplication", + }, + validLabels, + ), + successExecutionTimeSeconds: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationSuccessExecutionTimeSeconds), + }, + validLabels, + ), + failureExecutionTimeSeconds: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationFailureExecutionTimeSeconds), + }, + validLabels, + ), + startLatencySeconds: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationStartLatencySeconds), + Help: "Spark App Start Latency via the Operator", + }, + validLabels, + ), + startLatencySecondsHistogram: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationStartLatencySecondsHistogram), + Help: "Spark App Start Latency counts in buckets via the Operator", + Buckets: jobStartLatencyBuckets, + }, + validLabels, + ), + } +} + +func (m *SparkApplicationMetrics) Register() { + if err := metrics.Registry.Register(m.count); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationCount) + } + if err := metrics.Registry.Register(m.submitCount); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationSubmitCount) + } + if err := metrics.Registry.Register(m.failedSubmissionCount); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationFailedSubmissionCount) + } + if err := metrics.Registry.Register(m.runningCount); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationRunningCount) + } + if err := metrics.Registry.Register(m.successCount); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationSuccessCount) + } + if err := metrics.Registry.Register(m.failureCount); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationFailureCount) + } + if err := metrics.Registry.Register(m.successExecutionTimeSeconds); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationSuccessExecutionTimeSeconds) + } + if err := metrics.Registry.Register(m.failureExecutionTimeSeconds); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationFailureExecutionTimeSeconds) + } + if err := metrics.Registry.Register(m.startLatencySeconds); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationStartLatencySeconds) + } + if err := metrics.Registry.Register(m.startLatencySecondsHistogram); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationStartLatencySecondsHistogram) + } +} + +func (m *SparkApplicationMetrics) HandleSparkApplicationCreate(app *v1beta2.SparkApplication) { + state := util.GetApplicationState(app) + + switch state { + case v1beta2.ApplicationStateNew: + m.incCount(app) + case v1beta2.ApplicationStateSubmitted: + m.incSubmitCount(app) + case v1beta2.ApplicationStateFailedSubmission: + m.incFailedSubmissionCount(app) + case v1beta2.ApplicationStateRunning: + m.incRunningCount(app) + case v1beta2.ApplicationStateFailed: + m.incFailureCount(app) + case v1beta2.ApplicationStateCompleted: + m.incSuccessCount(app) + } +} + +func (m *SparkApplicationMetrics) HandleSparkApplicationUpdate(oldApp *v1beta2.SparkApplication, newApp *v1beta2.SparkApplication) { + oldState := util.GetApplicationState(oldApp) + newState := util.GetApplicationState(newApp) + if newState == oldState { + return + } + + switch oldState { + case v1beta2.ApplicationStateRunning: + m.decRunningCount(oldApp) + } + + switch newState { + case v1beta2.ApplicationStateNew: + m.incCount(newApp) + case v1beta2.ApplicationStateSubmitted: + m.incSubmitCount(newApp) + case v1beta2.ApplicationStateFailedSubmission: + m.incFailedSubmissionCount(newApp) + case v1beta2.ApplicationStateRunning: + m.incRunningCount(newApp) + m.observeStartLatencySeconds(newApp) + case v1beta2.ApplicationStateCompleted: + m.incSuccessCount(newApp) + m.observeSuccessExecutionTimeSeconds(newApp) + case v1beta2.ApplicationStateFailed: + m.incFailureCount(newApp) + m.observeFailureExecutionTimeSeconds(newApp) + } +} + +func (m *SparkApplicationMetrics) HandleSparkApplicationDelete(app *v1beta2.SparkApplication) { + state := util.GetApplicationState(app) + + switch state { + case v1beta2.ApplicationStateRunning: + m.decRunningCount(app) + } +} + +func (m *SparkApplicationMetrics) incCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + counter, err := m.count.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationCount, "labels", labels) + return + } + + counter.Inc() + logger.V(1).Info("Increased spark application count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) incSubmitCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + counter, err := m.submitCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSubmitCount, "labels", labels) + return + } + + counter.Inc() + logger.V(1).Info("Increased spark application submit count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSubmitCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) incFailedSubmissionCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + counter, err := m.failedSubmissionCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailedSubmissionCount, "labels", labels) + return + } + + counter.Inc() + logger.V(1).Info("Increased spark application failed submission count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailedSubmissionCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) incRunningCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + gauge, err := m.runningCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationRunningCount, "labels", labels) + return + } + + gauge.Inc() + logger.V(1).Info("Increased spark application running count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationRunningCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) decRunningCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + gauge, err := m.runningCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationRunningCount, "labels", labels) + return + } + + gauge.Dec() + logger.V(1).Info("Decreased SparkApplication running count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationRunningCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) incSuccessCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + counter, err := m.successCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSuccessCount, "labels", labels) + return + } + + counter.Inc() + logger.V(1).Info("Increased spark application success count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSuccessCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) incFailureCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + counter, err := m.failureCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailureCount, "labels", labels) + return + } + + counter.Inc() + logger.V(1).Info("Increased spark application failure count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailureCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) observeSuccessExecutionTimeSeconds(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + observer, err := m.successExecutionTimeSeconds.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSuccessExecutionTimeSeconds, "labels", labels) + } + + if app.Status.LastSubmissionAttemptTime.IsZero() || app.Status.TerminationTime.IsZero() { + err := fmt.Errorf("last submission attempt time or termination time is zero") + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSuccessExecutionTimeSeconds, "labels", labels) + return + } + duration := app.Status.TerminationTime.Sub(app.Status.LastSubmissionAttemptTime.Time) + observer.Observe(duration.Seconds()) + logger.V(1).Info("Observed spark application success execution time seconds", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSuccessExecutionTimeSeconds, "labels", labels, "value", duration.Seconds()) +} + +func (m *SparkApplicationMetrics) observeFailureExecutionTimeSeconds(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + observer, err := m.failureExecutionTimeSeconds.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailureExecutionTimeSeconds, "labels", labels) + } + + if app.Status.LastSubmissionAttemptTime.IsZero() || app.Status.TerminationTime.IsZero() { + err := fmt.Errorf("last submission attempt time or termination time is zero") + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailureExecutionTimeSeconds, "labels", labels) + return + } + duration := app.Status.TerminationTime.Sub(app.Status.LastSubmissionAttemptTime.Time) + observer.Observe(duration.Seconds()) + logger.V(1).Info("Observed spark application failure execution time seconds", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailureExecutionTimeSeconds, "labels", labels, "value", duration.Seconds()) +} + +func (m *SparkApplicationMetrics) observeStartLatencySeconds(app *v1beta2.SparkApplication) { + // Only export the spark application start latency seconds metric for the first time + if app.Status.ExecutionAttempts != 1 { + return + } + + labels := m.getMetricLabels(app) + latency := time.Since(app.CreationTimestamp.Time) + if observer, err := m.startLatencySeconds.GetMetricWith(labels); err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationStartLatencySeconds, "labels", labels) + } else { + observer.Observe(latency.Seconds()) + logger.V(1).Info("Observed spark application start latency seconds", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationStartLatencySeconds, "labels", labels, "value", latency.Seconds()) + } + + if histogram, err := m.startLatencySecondsHistogram.GetMetricWith(labels); err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationStartLatencySecondsHistogram, "labels", labels) + } else { + histogram.Observe(latency.Seconds()) + logger.V(1).Info("Observed spark application start latency seconds", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationStartLatencySecondsHistogram, "labels", labels, "value", latency.Seconds()) + } +} + +func (m *SparkApplicationMetrics) getMetricLabels(app *v1beta2.SparkApplication) map[string]string { + // Convert spark application validLabels to valid metric validLabels. + validLabels := make(map[string]string) + for key, val := range app.Labels { + newKey := util.CreateValidMetricNameLabel(m.prefix, key) + validLabels[newKey] = val + } + + metricLabels := make(map[string]string) + for _, label := range m.labels { + if _, ok := validLabels[label]; ok { + metricLabels[label] = validLabels[label] + } else if label == "namespace" { + metricLabels[label] = app.Namespace + } else { + metricLabels[label] = "Unknown" + } + } + return metricLabels +} diff --git a/internal/metrics/sparkpod_metrics.go b/internal/metrics/sparkpod_metrics.go new file mode 100644 index 000000000..91edc96b9 --- /dev/null +++ b/internal/metrics/sparkpod_metrics.go @@ -0,0 +1,191 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/metrics" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +type SparkExecutorMetrics struct { + prefix string + labels []string + + runningCount *prometheus.GaugeVec + successCount *prometheus.CounterVec + failureCount *prometheus.CounterVec +} + +func NewSparkExecutorMetrics(prefix string, labels []string) *SparkExecutorMetrics { + validLabels := make([]string, 0, len(labels)) + for _, label := range labels { + validLabel := util.CreateValidMetricNameLabel("", label) + validLabels = append(validLabels, validLabel) + } + + return &SparkExecutorMetrics{ + prefix: prefix, + labels: labels, + + runningCount: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkExecutorRunningCount), + Help: "Total number of running Spark executors", + }, + validLabels, + ), + successCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkExecutorSuccessCount), + Help: "Total number of successful Spark executors", + }, + validLabels, + ), + failureCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkExecutorFailureCount), + Help: "Total number of failed Spark executors", + }, + validLabels, + ), + } +} + +func (m *SparkExecutorMetrics) Register() { + if err := metrics.Registry.Register(m.runningCount); err != nil { + logger.Error(err, "Failed to register spark executor metric", "name", common.MetricSparkExecutorRunningCount) + } + if err := metrics.Registry.Register(m.successCount); err != nil { + logger.Error(err, "Failed to register spark executor metric", "name", common.MetricSparkExecutorSuccessCount) + } + if err := metrics.Registry.Register(m.failureCount); err != nil { + logger.Error(err, "Failed to register spark executor metric", "name", common.MetricSparkExecutorFailureCount) + } +} + +func (m *SparkExecutorMetrics) HandleSparkExecutorCreate(pod *corev1.Pod) { + state := util.GetExecutorState(pod) + switch state { + case v1beta2.ExecutorStateRunning: + m.incRunningCount(pod) + } +} + +func (m *SparkExecutorMetrics) HandleSparkExecutorUpdate(oldPod, newPod *corev1.Pod) { + oldState := util.GetExecutorState(oldPod) + newState := util.GetExecutorState(newPod) + if newState == oldState { + return + } + + switch oldState { + case v1beta2.ExecutorStateRunning: + m.decRunningCount(oldPod) + } + + switch newState { + case v1beta2.ExecutorStateRunning: + m.incRunningCount(newPod) + case v1beta2.ExecutorStateCompleted: + m.incSuccessCount(newPod) + case v1beta2.ExecutorStateFailed: + m.incFailureCount(newPod) + } +} + +func (m *SparkExecutorMetrics) HandleSparkExecutorDelete(pod *corev1.Pod) { + state := util.GetExecutorState(pod) + + switch state { + case v1beta2.ExecutorStateRunning: + m.decRunningCount(pod) + } +} + +func (m *SparkExecutorMetrics) incRunningCount(pod *corev1.Pod) { + labels := m.getMetricLabels(pod) + runningCount, err := m.runningCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for Spark executor", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorRunningCount, "labels", labels) + return + } + + runningCount.Inc() + logger.V(1).Info("Increased Spark executor running count", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorRunningCount, "labels", labels) +} + +func (m *SparkExecutorMetrics) decRunningCount(pod *corev1.Pod) { + labels := m.getMetricLabels(pod) + runningCount, err := m.runningCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for Spark executor", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorRunningCount, "labels", labels) + return + } + + runningCount.Dec() + logger.V(1).Info("Decreased Spark executor running count", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorRunningCount, "labels", labels) +} + +func (m *SparkExecutorMetrics) incSuccessCount(pod *corev1.Pod) { + labels := m.getMetricLabels(pod) + successCount, err := m.successCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for Spark executor", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorSuccessCount, "labels", labels) + return + } + + successCount.Inc() + logger.V(1).Info("Increased Spark executor success count", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorSuccessCount, "labels", labels) +} + +func (m *SparkExecutorMetrics) incFailureCount(pod *corev1.Pod) { + labels := m.getMetricLabels(pod) + failureCount, err := m.failureCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for Spark executor", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorFailureCount, "labels", labels) + return + } + + failureCount.Inc() + logger.V(1).Info("Increased Spark executor running count", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorFailureCount, "labels", labels) +} + +func (m *SparkExecutorMetrics) getMetricLabels(pod *corev1.Pod) map[string]string { + // Convert pod metricLabels to valid metric metricLabels. + validLabels := make(map[string]string) + for key, val := range pod.Labels { + newKey := util.CreateValidMetricNameLabel("", key) + validLabels[newKey] = val + } + + metricLabels := make(map[string]string) + for _, label := range m.labels { + if _, ok := validLabels[label]; ok { + metricLabels[label] = validLabels[label] + } else if label == "namespace" { + metricLabels[label] = pod.Namespace + } else { + metricLabels[label] = "Unknown" + } + } + return metricLabels +} diff --git a/internal/scheduler/registry.go b/internal/scheduler/registry.go new file mode 100644 index 000000000..caa4a939e --- /dev/null +++ b/internal/scheduler/registry.go @@ -0,0 +1,75 @@ +/* +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "fmt" + "sync" +) + +var registry *Registry + +// Registry is a registry of scheduler factories. +type Registry struct { + factories map[string]Factory + + mu sync.Mutex +} + +func GetRegistry() *Registry { + if registry == nil { + registry = &Registry{ + factories: make(map[string]Factory), + } + } + return registry +} + +func (r *Registry) GetScheduler(name string, config Config) (Interface, error) { + r.mu.Lock() + defer r.mu.Unlock() + + factory, exists := r.factories[name] + if !exists { + return nil, fmt.Errorf("scheduler %s not found", name) + } + + return factory(config) +} + +// RegisterScheduler registers a scheduler to the manager. +func (r *Registry) Register(name string, factory Factory) error { + r.mu.Lock() + defer r.mu.Unlock() + + if _, ok := r.factories[name]; ok { + return fmt.Errorf("scheduler %s is already registered", name) + } + + r.factories[name] = factory + logger.Info("Registered scheduler", "name", name) + return nil +} + +// GetRegisteredSchedulerNames gets the registered scheduler names. +func (r *Registry) GetRegisteredSchedulerNames() []string { + var names []string + for name := range r.factories { + names = append(names, name) + } + return names +} diff --git a/internal/scheduler/scheduler.go b/internal/scheduler/scheduler.go new file mode 100644 index 000000000..2ab2f07dd --- /dev/null +++ b/internal/scheduler/scheduler.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +var ( + logger = log.Log.WithName("") +) + +// Interface defines the interface of a batch scheduler. +type Interface interface { + Name() string + ShouldSchedule(app *v1beta2.SparkApplication) bool + Schedule(app *v1beta2.SparkApplication) error + Cleanup(app *v1beta2.SparkApplication) error +} + +// Config defines the configuration of a batch scheduler. +type Config interface{} + +// Factory defines the factory of a batch scheduler. +type Factory func(config Config) (Interface, error) diff --git a/internal/scheduler/volcano/scheduler.go b/internal/scheduler/volcano/scheduler.go new file mode 100644 index 000000000..d75912f1c --- /dev/null +++ b/internal/scheduler/volcano/scheduler.go @@ -0,0 +1,229 @@ +/* +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volcano + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/log" + "volcano.sh/apis/pkg/apis/scheduling/v1beta1" + volcanoclientset "volcano.sh/apis/pkg/client/clientset/versioned" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/scheduler" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var ( + logger = log.Log.WithName("") +) + +// Scheduler is a batch scheduler that uses Volcano to schedule Spark applications. +type Scheduler struct { + extensionClient apiextensionsclientset.Interface + volcanoClient volcanoclientset.Interface +} + +// Scheduler implements scheduler.Interface. +var _ scheduler.Interface = &Scheduler{} + +// Config defines the configurations of Volcano scheduler. +type Config struct { + RestConfig *rest.Config +} + +// Config implements scheduler.Config. +var _ scheduler.Config = &Config{} + +// Factory creates a new VolcanoScheduler instance. +func Factory(config scheduler.Config) (scheduler.Interface, error) { + c, ok := config.(*Config) + if !ok { + return nil, fmt.Errorf("failed to get volcano scheduler config") + } + + extensionClient, err := apiextensionsclientset.NewForConfig(c.RestConfig) + if err != nil { + return nil, fmt.Errorf("failed to initialize k8s extension client: %v", err) + } + + if _, err := extensionClient.ApiextensionsV1().CustomResourceDefinitions().Get( + context.TODO(), + common.VolcanoPodGroupName, + metav1.GetOptions{}, + ); err != nil { + // For backward compatibility check v1beta1 API version of CustomResourceDefinitions + if _, err := extensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get( + context.TODO(), + common.VolcanoPodGroupName, + metav1.GetOptions{}, + ); err != nil { + return nil, fmt.Errorf("CRD PodGroup does not exist: %v", err) + } + } + + volcanoClient, err := volcanoclientset.NewForConfig(c.RestConfig) + if err != nil { + return nil, fmt.Errorf("failed to initialize volcano client: %v", err) + } + + scheduler := &Scheduler{ + extensionClient: extensionClient, + volcanoClient: volcanoClient, + } + return scheduler, nil +} + +// Name implements batchscheduler.Interface. +func (s *Scheduler) Name() string { + return common.VolcanoSchedulerName +} + +// ShouldSchedule implements batchscheduler.Interface. +func (s *Scheduler) ShouldSchedule(_ *v1beta2.SparkApplication) bool { + // There is no additional requirement for volcano scheduler + return true +} + +// Schedule implements batchscheduler.Interface. +func (s *Scheduler) Schedule(app *v1beta2.SparkApplication) error { + if app.ObjectMeta.Annotations == nil { + app.ObjectMeta.Annotations = make(map[string]string) + } + if app.Spec.Driver.Annotations == nil { + app.Spec.Driver.Annotations = make(map[string]string) + } + if app.Spec.Executor.Annotations == nil { + app.Spec.Executor.Annotations = make(map[string]string) + } + + switch app.Spec.Mode { + case v1beta2.DeployModeClient: + return s.syncPodGroupInClientMode(app) + case v1beta2.DeployModeCluster: + return s.syncPodGroupInClusterMode(app) + } + return nil +} + +// Cleanup implements batchscheduler.Interface. +func (s *Scheduler) Cleanup(app *v1beta2.SparkApplication) error { + name := getPodGroupName(app) + namespace := app.Namespace + if err := s.volcanoClient.SchedulingV1beta1().PodGroups(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { + return err + } + logger.Info("Deleted PodGroup", "name", name, "namespace", namespace) + return nil +} + +func (s *Scheduler) syncPodGroupInClientMode(app *v1beta2.SparkApplication) error { + // We only care about the executor pods in client mode + if _, ok := app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey]; !ok { + totalResource := util.GetExecutorRequestResource(app) + + if app.Spec.BatchSchedulerOptions != nil && len(app.Spec.BatchSchedulerOptions.Resources) > 0 { + totalResource = app.Spec.BatchSchedulerOptions.Resources + } + if err := s.syncPodGroup(app, 1, totalResource); err == nil { + app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey] = getPodGroupName(app) + } else { + return err + } + } + return nil +} + +func (s *Scheduler) syncPodGroupInClusterMode(app *v1beta2.SparkApplication) error { + // We need mark both driver and executor when submitting. + // In cluster mode, the initial size of PodGroup is set to 1 in order to schedule driver pod first. + if _, ok := app.Spec.Driver.Annotations[v1beta1.KubeGroupNameAnnotationKey]; !ok { + // Both driver and executor resource will be considered. + totalResource := util.SumResourceList([]corev1.ResourceList{util.GetDriverRequestResource(app), util.GetExecutorRequestResource(app)}) + if app.Spec.BatchSchedulerOptions != nil && len(app.Spec.BatchSchedulerOptions.Resources) > 0 { + totalResource = app.Spec.BatchSchedulerOptions.Resources + } + + if err := s.syncPodGroup(app, 1, totalResource); err != nil { + return err + } + app.Spec.Driver.Annotations[v1beta1.KubeGroupNameAnnotationKey] = getPodGroupName(app) + app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey] = getPodGroupName(app) + } + return nil +} + +func (s *Scheduler) syncPodGroup(app *v1beta2.SparkApplication, size int32, minResource corev1.ResourceList) error { + var err error + var pg *v1beta1.PodGroup + name := getPodGroupName(app) + namespace := app.Namespace + + if pg, err = s.volcanoClient.SchedulingV1beta1().PodGroups(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { + if !errors.IsNotFound(err) { + return err + } + + podGroup := v1beta1.PodGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(app, v1beta2.SchemeGroupVersion.WithKind("SparkApplication")), + }, + }, + Spec: v1beta1.PodGroupSpec{ + MinMember: size, + MinResources: &minResource, + }, + Status: v1beta1.PodGroupStatus{ + Phase: v1beta1.PodGroupPending, + }, + } + + if app.Spec.BatchSchedulerOptions != nil { + // Update pod group queue if it's specified in Spark Application + if app.Spec.BatchSchedulerOptions.Queue != nil { + podGroup.Spec.Queue = *app.Spec.BatchSchedulerOptions.Queue + } + // Update pod group priorityClassName if it's specified in Spark Application + if app.Spec.BatchSchedulerOptions.PriorityClassName != nil { + podGroup.Spec.PriorityClassName = *app.Spec.BatchSchedulerOptions.PriorityClassName + } + } + _, err = s.volcanoClient.SchedulingV1beta1().PodGroups(namespace).Create(context.TODO(), &podGroup, metav1.CreateOptions{}) + } else { + if pg.Spec.MinMember != size { + pg.Spec.MinMember = size + _, err = s.volcanoClient.SchedulingV1beta1().PodGroups(namespace).Update(context.TODO(), pg, metav1.UpdateOptions{}) + } + } + + if err != nil { + return fmt.Errorf("failed to sync PodGroup with error: %s. Abandon schedule pods via volcano", err) + } + logger.Info("Created PodGroup", "name", name, "namespace", namespace) + + return nil +} diff --git a/pkg/batchscheduler/volcano/volcano_scheduler_test.go b/internal/scheduler/volcano/scheduler_test.go similarity index 80% rename from pkg/batchscheduler/volcano/volcano_scheduler_test.go rename to internal/scheduler/volcano/scheduler_test.go index 1587ef106..fb6b8caed 100644 --- a/pkg/batchscheduler/volcano/volcano_scheduler_test.go +++ b/internal/scheduler/volcano/scheduler_test.go @@ -14,32 +14,32 @@ See the License for the specific language governing permissions and limitations under the License. */ -package volcano +package volcano_test import ( "testing" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/util" ) func TestGetDriverResource(t *testing.T) { - var oneCore int32 = 1 oneCoreStr := "1" oneGB := "1024m" twoCoresStr := "2" - result := v1.ResourceList{} - result[v1.ResourceCPU] = resource.MustParse("1") - result[v1.ResourceMemory] = resource.MustParse("2048m") + result := corev1.ResourceList{} + result[corev1.ResourceCPU] = resource.MustParse("1") + result[corev1.ResourceMemory] = resource.MustParse("2048m") - testcases := []struct { + testCases := []struct { Name string app v1beta2.SparkApplication - result v1.ResourceList + result corev1.ResourceList }{ { Name: "Validate Core and memory", @@ -74,10 +74,10 @@ func TestGetDriverResource(t *testing.T) { }, } - for _, testcase := range testcases { - t.Run(testcase.Name, func(t *testing.T) { - r := getDriverRequestResource(&testcase.app) - for name, quantity := range testcase.result { + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + r := util.GetDriverRequestResource(&tc.app) + for name, quantity := range tc.result { if actual, ok := r[name]; !ok { t.Errorf("expecting driver pod to have resource %s, while get none", name) } else { @@ -99,14 +99,14 @@ func TestGetExecutorResource(t *testing.T) { twoCores := int32(2) instances := int32(2) - result := v1.ResourceList{} - result[v1.ResourceCPU] = resource.MustParse("2") - result[v1.ResourceMemory] = resource.MustParse("4096m") + result := corev1.ResourceList{} + result[corev1.ResourceCPU] = resource.MustParse("2") + result[corev1.ResourceMemory] = resource.MustParse("4096m") - testcases := []struct { + testCases := []struct { Name string app v1beta2.SparkApplication - result v1.ResourceList + result corev1.ResourceList }{ { Name: "Validate Core and memory", @@ -159,10 +159,10 @@ func TestGetExecutorResource(t *testing.T) { }, } - for _, testcase := range testcases { - t.Run(testcase.Name, func(t *testing.T) { - r := getExecutorRequestResource(&testcase.app) - for name, quantity := range testcase.result { + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + r := util.GetExecutorRequestResource(&tc.app) + for name, quantity := range tc.result { if actual, ok := r[name]; !ok { t.Errorf("expecting executor pod to have resource %s, while get none", name) } else { diff --git a/pkg/util/array_flag.go b/internal/scheduler/volcano/util.go similarity index 70% rename from pkg/util/array_flag.go rename to internal/scheduler/volcano/util.go index 730db6975..75fe54c3b 100644 --- a/pkg/util/array_flag.go +++ b/internal/scheduler/volcano/util.go @@ -1,5 +1,5 @@ /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,17 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package volcano -import "fmt" +import ( + "fmt" -type ArrayFlags []string + "github.com/kubeflow/spark-operator/api/v1beta2" +) -func (a *ArrayFlags) String() string { - return fmt.Sprint(*a) -} - -func (a *ArrayFlags) Set(value string) error { - *a = append(*a, value) - return nil +func getPodGroupName(app *v1beta2.SparkApplication) string { + return fmt.Sprintf("spark-%s-pg", app.Name) } diff --git a/pkg/webhook/doc.go b/internal/webhook/doc.go similarity index 100% rename from pkg/webhook/doc.go rename to internal/webhook/doc.go diff --git a/internal/webhook/resourcequota.go b/internal/webhook/resourcequota.go new file mode 100644 index 000000000..5e5109846 --- /dev/null +++ b/internal/webhook/resourcequota.go @@ -0,0 +1,259 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var javaStringSuffixes = map[string]int64{ + "b": 1, + "kb": 1 << 10, + "k": 1 << 10, + "mb": 1 << 20, + "m": 1 << 20, + "gb": 1 << 30, + "g": 1 << 30, + "tb": 1 << 40, + "t": 1 << 40, + "pb": 1 << 50, + "p": 1 << 50, +} + +var javaStringPattern = regexp.MustCompile(`([0-9]+)([a-z]+)?`) +var javaFractionStringPattern = regexp.MustCompile(`([0-9]+\.[0-9]+)([a-z]+)?`) + +// getResourceList returns the resource requests of the given SparkApplication. +func getResourceList(app *v1beta2.SparkApplication) (corev1.ResourceList, error) { + coresRequests, err := getCoresRequests(app) + if err != nil { + return nil, err + } + + coresLimits, err := getCoresLimits(app) + if err != nil { + return nil, err + } + + memoryRequests, err := getMemoryRequests(app) + if err != nil { + return nil, err + } + + memoryLimits, err := getMemoryLimits(app) + if err != nil { + return nil, err + } + + resourceList := util.SumResourceList([]corev1.ResourceList{ + coresRequests, + coresLimits, + memoryRequests, + memoryLimits, + }) + + return resourceList, nil +} + +func getCoresRequests(app *v1beta2.SparkApplication) (corev1.ResourceList, error) { + // Calculate driver cores requests. + driverCoresRequests, err := getSparkPodCoresRequests(&app.Spec.Driver.SparkPodSpec, 1) + if err != nil { + return nil, err + } + + // Calculate executor cores requests. + var replicas int64 = 1 + if app.Spec.Executor.Instances != nil { + replicas = int64(*app.Spec.Executor.Instances) + } + executorCoresRequests, err := getSparkPodCoresRequests(&app.Spec.Executor.SparkPodSpec, replicas) + if err != nil { + return nil, err + } + + return util.SumResourceList([]corev1.ResourceList{driverCoresRequests, executorCoresRequests}), nil +} + +func getSparkPodCoresRequests(podSpec *v1beta2.SparkPodSpec, replicas int64) (corev1.ResourceList, error) { + var milliCores int64 + if podSpec.Cores != nil { + milliCores = int64(*podSpec.Cores) * 1000 + } else { + milliCores = common.DefaultCPUMilliCores + } + resourceList := corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(milliCores*replicas, resource.DecimalSI), + corev1.ResourceRequestsCPU: *resource.NewMilliQuantity(milliCores*replicas, resource.DecimalSI), + } + return resourceList, nil +} + +func getCoresLimits(app *v1beta2.SparkApplication) (corev1.ResourceList, error) { + // Calculate driver cores limits. + driverCoresLimits, err := getSparkPodCoresLimits(&app.Spec.Driver.SparkPodSpec, 1) + if err != nil { + return nil, err + } + + // Calculate executor cores requests. + var replicas int64 = 1 + if app.Spec.Executor.Instances != nil { + replicas = int64(*app.Spec.Executor.Instances) + } + executorCoresLimits, err := getSparkPodCoresLimits(&app.Spec.Executor.SparkPodSpec, replicas) + if err != nil { + return nil, err + } + + return util.SumResourceList([]corev1.ResourceList{driverCoresLimits, executorCoresLimits}), nil +} + +func getSparkPodCoresLimits(podSpec *v1beta2.SparkPodSpec, replicas int64) (corev1.ResourceList, error) { + var milliCores int64 + if podSpec.CoreLimit != nil { + quantity, err := resource.ParseQuantity(*podSpec.CoreLimit) + if err != nil { + return nil, err + } + milliCores = quantity.MilliValue() + } else if podSpec.Cores != nil { + milliCores = int64(*podSpec.Cores) * 1000 + } else { + milliCores = common.DefaultCPUMilliCores + } + resourceList := corev1.ResourceList{ + corev1.ResourceLimitsCPU: *resource.NewMilliQuantity(milliCores*replicas, resource.DecimalSI), + } + return resourceList, nil +} + +func getMemoryRequests(app *v1beta2.SparkApplication) (corev1.ResourceList, error) { + // If memory overhead factor is set, use it. Otherwise, use the default value. + var memoryOverheadFactor float64 + if app.Spec.MemoryOverheadFactor != nil { + parsed, err := strconv.ParseFloat(*app.Spec.MemoryOverheadFactor, 64) + if err != nil { + return nil, err + } + memoryOverheadFactor = parsed + } else if app.Spec.Type == v1beta2.SparkApplicationTypeJava { + memoryOverheadFactor = common.DefaultJVMMemoryOverheadFactor + } else { + memoryOverheadFactor = common.DefaultNonJVMMemoryOverheadFactor + } + + // Calculate driver pod memory requests. + driverResourceList, err := getSparkPodMemoryRequests(&app.Spec.Driver.SparkPodSpec, memoryOverheadFactor, 1) + if err != nil { + return nil, err + } + + // Calculate executor pod memory requests. + var replicas int64 = 1 + if app.Spec.Executor.Instances != nil { + replicas = int64(*app.Spec.Executor.Instances) + } + executorResourceList, err := getSparkPodMemoryRequests(&app.Spec.Executor.SparkPodSpec, memoryOverheadFactor, replicas) + if err != nil { + return nil, err + } + + return util.SumResourceList([]corev1.ResourceList{driverResourceList, executorResourceList}), nil +} + +func getSparkPodMemoryRequests(podSpec *v1beta2.SparkPodSpec, memoryOverheadFactor float64, replicas int64) (corev1.ResourceList, error) { + var memoryBytes, memoryOverheadBytes int64 + if podSpec.Memory != nil { + parsed, err := parseJavaMemoryString(*podSpec.Memory) + if err != nil { + return nil, err + } + memoryBytes = parsed + } + + if podSpec.MemoryOverhead != nil { + parsed, err := parseJavaMemoryString(*podSpec.MemoryOverhead) + if err != nil { + return nil, err + } + memoryOverheadBytes = parsed + } else { + memoryOverheadBytes = int64(math.Max(float64(memoryBytes)*memoryOverheadFactor, common.MinMemoryOverhead)) + } + + resourceList := corev1.ResourceList{ + corev1.ResourceMemory: *resource.NewQuantity((memoryBytes+memoryOverheadBytes)*replicas, resource.BinarySI), + corev1.ResourceRequestsMemory: *resource.NewQuantity((memoryBytes+memoryOverheadBytes)*replicas, resource.BinarySI), + } + return resourceList, nil +} + +// For Spark pod, memory requests and limits are the same. +func getMemoryLimits(app *v1beta2.SparkApplication) (corev1.ResourceList, error) { + return getMemoryRequests(app) +} + +// Logic copied from https://github.com/apache/spark/blob/5264164a67df498b73facae207eda12ee133be7d/common/network-common/src/main/java/org/apache/spark/network/util/JavaUtils.java#L276 +func parseJavaMemoryString(s string) (int64, error) { + lower := strings.ToLower(s) + if matches := javaStringPattern.FindStringSubmatch(lower); matches != nil { + value, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, err + } + suffix := matches[2] + if multiplier, present := javaStringSuffixes[suffix]; present { + return multiplier * value, nil + } + } else if matches = javaFractionStringPattern.FindStringSubmatch(lower); matches != nil { + value, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return 0, err + } + suffix := matches[2] + if multiplier, present := javaStringSuffixes[suffix]; present { + return int64(float64(multiplier) * value), nil + } + } + return 0, fmt.Errorf("could not parse string '%s' as a Java-style memory value. Examples: 100kb, 1.5mb, 1g", s) +} + +// Check whether the resource list will satisfy the resource quota. +func validateResourceQuota(resourceList corev1.ResourceList, resourceQuota corev1.ResourceQuota) bool { + for key, quantity := range resourceList { + if _, ok := resourceQuota.Status.Hard[key]; !ok { + continue + } + quantity.Add(resourceQuota.Status.Used[key]) + if quantity.Cmp(resourceQuota.Spec.Hard[key]) > 0 { + return false + } + } + return true +} diff --git a/internal/webhook/resourcequota_test.go b/internal/webhook/resourcequota_test.go new file mode 100644 index 000000000..285e4841f --- /dev/null +++ b/internal/webhook/resourcequota_test.go @@ -0,0 +1,41 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "testing" +) + +func assertMemory(memoryString string, expectedBytes int64, t *testing.T) { + m, err := parseJavaMemoryString(memoryString) + if err != nil { + t.Error(err) + return + } + if m != expectedBytes { + t.Errorf("%s: expected %v bytes, got %v bytes", memoryString, expectedBytes, m) + return + } +} + +func TestJavaMemoryString(t *testing.T) { + assertMemory("1b", 1, t) + assertMemory("100k", 100*1024, t) + assertMemory("1gb", 1024*1024*1024, t) + assertMemory("10TB", 10*1024*1024*1024*1024, t) + assertMemory("10PB", 10*1024*1024*1024*1024*1024, t) +} diff --git a/internal/webhook/scheduledsparkapplication_defaulter.go b/internal/webhook/scheduledsparkapplication_defaulter.go new file mode 100644 index 000000000..afdf4304e --- /dev/null +++ b/internal/webhook/scheduledsparkapplication_defaulter.go @@ -0,0 +1,49 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +// +kubebuilder:webhook:admissionReviewVersions=v1,failurePolicy=fail,groups=sparkoperator.k8s.io,matchPolicy=Exact,mutating=false,name=mutate-scheduledsparkapplication.sparkoperator.k8s.io,path=/validate-sparkoperator-k8s-io-v1beta2-sparkapplication,reinvocationPolicy=Never,resources=scheduledsparkapplications,sideEffects=None,verbs=create;update,versions=v1beta2,webhookVersions=v1 + +// ScheduledSparkApplicationDefaulter sets default values for a SparkApplication. +type ScheduledSparkApplicationDefaulter struct{} + +// NewSparkApplicationValidator creates a new SparkApplicationValidator instance. +func NewScheduledSparkApplicationDefaulter() *ScheduledSparkApplicationDefaulter { + return &ScheduledSparkApplicationDefaulter{} +} + +// SparkApplicationDefaulter implements admission.CustomDefaulter. +var _ admission.CustomDefaulter = &ScheduledSparkApplicationDefaulter{} + +// Default implements admission.CustomDefaulter. +func (d *ScheduledSparkApplicationDefaulter) Default(ctx context.Context, obj runtime.Object) error { + app, ok := obj.(*v1beta2.ScheduledSparkApplication) + if !ok { + return nil + } + logger.Info("Defaulting ScheduledSparkApplication", "name", app.Name, "namespace", app.Namespace) + return nil +} diff --git a/internal/webhook/scheduledsparkapplication_validator.go b/internal/webhook/scheduledsparkapplication_validator.go new file mode 100644 index 000000000..4f3f19b9c --- /dev/null +++ b/internal/webhook/scheduledsparkapplication_validator.go @@ -0,0 +1,80 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:admissionReviewVersions=v1,failurePolicy=fail,groups=sparkoperator.k8s.io,matchPolicy=Exact,mutating=false,name=validate-scheduledsparkapplication.sparkoperator.k8s.io,path=/validate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication,reinvocationPolicy=Never,resources=scheduledsparkapplications,sideEffects=None,verbs=create;update,versions=v1beta2,webhookVersions=v1 + +type ScheduledSparkApplicationValidator struct{} + +// NewScheduledSparkApplicationValidator creates a new ScheduledSparkApplicationValidator instance. +func NewScheduledSparkApplicationValidator() *ScheduledSparkApplicationValidator { + return &ScheduledSparkApplicationValidator{} +} + +var _ admission.CustomValidator = &ScheduledSparkApplicationValidator{} + +// ValidateCreate implements admission.CustomValidator. +func (v *ScheduledSparkApplicationValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + app, ok := obj.(*v1beta2.ScheduledSparkApplication) + if !ok { + return nil, nil + } + logger.Info("Validating SchedulingSparkApplication create", "name", app.Name, "namespace", app.Namespace) + if err := v.validate(app); err != nil { + return nil, err + } + return nil, nil +} + +// ValidateUpdate implements admission.CustomValidator. +func (v *ScheduledSparkApplicationValidator) ValidateUpdate(ctx context.Context, oldObj runtime.Object, newObj runtime.Object) (warnings admission.Warnings, err error) { + newApp, ok := newObj.(*v1beta2.ScheduledSparkApplication) + if !ok { + return nil, nil + } + logger.Info("Validating SchedulingSparkApplication update", "name", newApp.Name, "namespace", newApp.Namespace) + if err := v.validate(newApp); err != nil { + return nil, err + } + return nil, nil +} + +// ValidateDelete implements admission.CustomValidator. +func (v *ScheduledSparkApplicationValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + app, ok := obj.(*v1beta2.ScheduledSparkApplication) + if !ok { + return nil, nil + } + logger.Info("Validating ScheduledSparkApplication delete", "name", app.Name, "namespace", app.Namespace) + return nil, nil +} + +func (v *ScheduledSparkApplicationValidator) validate(_ *v1beta2.ScheduledSparkApplication) error { + // TODO: implement validate logic + return nil +} diff --git a/internal/webhook/sparkapplication_defaulter.go b/internal/webhook/sparkapplication_defaulter.go new file mode 100644 index 000000000..661ecf708 --- /dev/null +++ b/internal/webhook/sparkapplication_defaulter.go @@ -0,0 +1,125 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "strconv" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +// +kubebuilder:webhook:admissionReviewVersions=v1,failurePolicy=fail,groups=sparkoperator.k8s.io,matchPolicy=Exact,mutating=true,name=mutate-sparkapplication.sparkoperator.k8s.io,path=/mutate-sparkoperator-k8s-io-v1beta2-sparkapplication,reinvocationPolicy=Never,resources=sparkapplications,sideEffects=None,verbs=create;update,versions=v1beta2,webhookVersions=v1 + +// SparkApplicationDefaulter sets default values for a SparkApplication. +type SparkApplicationDefaulter struct{} + +// NewSparkApplicationValidator creates a new SparkApplicationValidator instance. +func NewSparkApplicationDefaulter() *SparkApplicationDefaulter { + return &SparkApplicationDefaulter{} +} + +// SparkApplicationDefaulter implements admission.CustomDefaulter. +var _ admission.CustomDefaulter = &SparkApplicationDefaulter{} + +// Default implements admission.CustomDefaulter. +func (d *SparkApplicationDefaulter) Default(ctx context.Context, obj runtime.Object) error { + app, ok := obj.(*v1beta2.SparkApplication) + if !ok { + return nil + } + + // Only set the default values for spark applications with new state or invalidating state. + state := util.GetApplicationState(app) + if state != v1beta2.ApplicationStateNew && state != v1beta2.ApplicationStateInvalidating { + return nil + } + + logger.Info("Defaulting SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", util.GetApplicationState(app)) + defaultSparkApplication(app) + return nil +} + +// defaultSparkApplication sets default values for certain fields of a SparkApplication. +func defaultSparkApplication(app *v1beta2.SparkApplication) { + if app.Spec.Mode == "" { + app.Spec.Mode = v1beta2.DeployModeCluster + } + + if app.Spec.RestartPolicy.Type == "" { + app.Spec.RestartPolicy.Type = v1beta2.RestartPolicyNever + } + + if app.Spec.RestartPolicy.Type != v1beta2.RestartPolicyNever { + if app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval == nil { + app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval = util.Int64Ptr(5) + } + if app.Spec.RestartPolicy.OnFailureRetryInterval == nil { + app.Spec.RestartPolicy.OnFailureRetryInterval = util.Int64Ptr(5) + } + } + + defaultDriverSpec(app) + defaultExecutorSpec(app) +} + +func defaultDriverSpec(app *v1beta2.SparkApplication) { + if app.Spec.Driver.Cores == nil { + if app.Spec.SparkConf == nil || app.Spec.SparkConf[common.SparkDriverCores] == "" { + app.Spec.Driver.Cores = util.Int32Ptr(1) + } + } + + if app.Spec.Driver.Memory == nil { + if app.Spec.SparkConf == nil || app.Spec.SparkConf[common.SparkDriverMemory] == "" { + app.Spec.Driver.Memory = util.StringPtr("1g") + } + } +} + +func defaultExecutorSpec(app *v1beta2.SparkApplication) { + if app.Spec.Executor.Cores == nil { + if app.Spec.SparkConf == nil || app.Spec.SparkConf[common.SparkExecutorCores] == "" { + app.Spec.Executor.Cores = util.Int32Ptr(1) + } + } + + if app.Spec.Executor.Memory == nil { + if app.Spec.SparkConf == nil || app.Spec.SparkConf[common.SparkExecutorMemory] == "" { + app.Spec.Executor.Memory = util.StringPtr("1g") + } + } + + if app.Spec.Executor.Instances == nil { + // Check whether dynamic allocation is enabled in application spec. + enableDynamicAllocation := app.Spec.DynamicAllocation != nil && app.Spec.DynamicAllocation.Enabled + // Check whether dynamic allocation is enabled in spark conf. + if !enableDynamicAllocation && app.Spec.SparkConf != nil { + if dynamicConf, _ := strconv.ParseBool(app.Spec.SparkConf[common.SparkDynamicAllocationEnabled]); dynamicConf { + enableDynamicAllocation = true + } + if !enableDynamicAllocation && app.Spec.SparkConf[common.SparkExecutorInstances] == "" { + app.Spec.Executor.Instances = util.Int32Ptr(1) + } + } + } +} diff --git a/internal/webhook/sparkapplication_validator.go b/internal/webhook/sparkapplication_validator.go new file mode 100644 index 000000000..7b1fd4108 --- /dev/null +++ b/internal/webhook/sparkapplication_validator.go @@ -0,0 +1,173 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/util" +) + +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:admissionReviewVersions=v1,failurePolicy=fail,groups=sparkoperator.k8s.io,matchPolicy=Exact,mutating=false,name=validate-sparkapplication.sparkoperator.k8s.io,path=/validate-sparkoperator-k8s-io-v1beta2-sparkapplication,reinvocationPolicy=Never,resources=sparkapplications,sideEffects=None,verbs=create;update,versions=v1beta2,webhookVersions=v1 + +type SparkApplicationValidator struct { + client client.Client + + enableResourceQuotaEnforcement bool +} + +// NewSparkApplicationValidator creates a new SparkApplicationValidator instance. +func NewSparkApplicationValidator(client client.Client, enableResourceQuotaEnforcement bool) *SparkApplicationValidator { + return &SparkApplicationValidator{ + client: client, + + enableResourceQuotaEnforcement: enableResourceQuotaEnforcement, + } +} + +var _ admission.CustomValidator = &SparkApplicationValidator{} + +// ValidateCreate implements admission.CustomValidator. +func (v *SparkApplicationValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + app, ok := obj.(*v1beta2.SparkApplication) + if !ok { + return nil, nil + } + logger.Info("Validating SparkApplication create", "name", app.Name, "namespace", app.Namespace, "state", util.GetApplicationState(app)) + if err := v.validateSpec(ctx, app); err != nil { + return nil, err + } + + if v.enableResourceQuotaEnforcement { + if err := v.validateResourceUsage(ctx, app); err != nil { + return nil, err + } + } + + return nil, nil +} + +// ValidateUpdate implements admission.CustomValidator. +func (v *SparkApplicationValidator) ValidateUpdate(ctx context.Context, oldObj runtime.Object, newObj runtime.Object) (warnings admission.Warnings, err error) { + oldApp, ok := oldObj.(*v1beta2.SparkApplication) + if !ok { + return nil, nil + } + + newApp, ok := newObj.(*v1beta2.SparkApplication) + if !ok { + return nil, nil + } + + logger.Info("Validating SparkApplication update", "name", newApp.Name, "namespace", newApp.Namespace) + + // Skip validating when spec does not change. + if equality.Semantic.DeepEqual(oldApp.Spec, newApp.Spec) { + return nil, nil + } + + if err := v.validateSpec(ctx, newApp); err != nil { + return nil, err + } + + // Validate SparkApplication resource usage when resource quota enforcement is enabled. + if v.enableResourceQuotaEnforcement { + if err := v.validateResourceUsage(ctx, newApp); err != nil { + return nil, err + } + } + + return nil, nil +} + +// ValidateDelete implements admission.CustomValidator. +func (v *SparkApplicationValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + app, ok := obj.(*v1beta2.SparkApplication) + if !ok { + return nil, nil + } + logger.Info("Validating SparkApplication delete", "name", app.Name, "namespace", app.Namespace, "state", util.GetApplicationState(app)) + return nil, nil +} + +func (v *SparkApplicationValidator) validateSpec(_ context.Context, app *v1beta2.SparkApplication) error { + logger.V(1).Info("Validating SparkApplication spec", "name", app.Name, "namespace", app.Namespace, "state", util.GetApplicationState(app)) + + if app.Spec.NodeSelector != nil && (app.Spec.Driver.NodeSelector != nil || app.Spec.Executor.NodeSelector != nil) { + return fmt.Errorf("node selector cannot be defined at both SparkApplication and Driver/Executor") + } + + servicePorts := make(map[int32]bool) + ingressURLFormats := make(map[string]bool) + for _, item := range app.Spec.DriverIngressOptions { + if item.ServicePort == nil { + return fmt.Errorf("DriverIngressOptions has nill ServicePort") + } + if servicePorts[*item.ServicePort] { + return fmt.Errorf("DriverIngressOptions has duplicate ServicePort: %d", *item.ServicePort) + } + servicePorts[*item.ServicePort] = true + + if item.IngressURLFormat == "" { + return fmt.Errorf("DriverIngressOptions has empty IngressURLFormat") + } + if ingressURLFormats[item.IngressURLFormat] { + return fmt.Errorf("DriverIngressOptions has duplicate IngressURLFormat: %s", item.IngressURLFormat) + } + ingressURLFormats[item.IngressURLFormat] = true + } + + return nil +} + +func (v *SparkApplicationValidator) validateResourceUsage(ctx context.Context, app *v1beta2.SparkApplication) error { + logger.V(1).Info("Validating SparkApplication resource usage", "name", app.Name, "namespace", app.Namespace, "state", util.GetApplicationState(app)) + + requests, err := getResourceList(app) + if err != nil { + return fmt.Errorf("failed to calculate resource quests: %v", err) + } + + resourceQuotaList := &corev1.ResourceQuotaList{} + if err := v.client.List(ctx, resourceQuotaList, client.InNamespace(app.Namespace)); err != nil { + return fmt.Errorf("failed to list resource quotas: %v", err) + } + + for _, resourceQuota := range resourceQuotaList.Items { + // Scope selectors not currently supported, ignore any ResourceQuota that does not match everything. + // TODO: Add support for scope selectors. + if resourceQuota.Spec.ScopeSelector != nil || len(resourceQuota.Spec.Scopes) > 0 { + continue + } + + if !validateResourceQuota(requests, resourceQuota) { + return fmt.Errorf("failed to validate resource quota \"%s/%s\"", resourceQuota.Namespace, resourceQuota.Name) + } + } + + return nil +} diff --git a/internal/webhook/sparkpod_defaulter.go b/internal/webhook/sparkpod_defaulter.go new file mode 100644 index 000000000..c7a7a858c --- /dev/null +++ b/internal/webhook/sparkpod_defaulter.go @@ -0,0 +1,732 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +const ( + maxNameLength = 63 +) + +// +kubebuilder:webhook:admissionReviewVersions=v1,failurePolicy=fail,groups="",matchPolicy=Exact,mutating=true,name=mutate-pod.sparkoperator.k8s.io,path=/mutate--v1-pod,reinvocationPolicy=Never,resources=pods,sideEffects=None,verbs=create;update,versions=v1,webhookVersions=v1 + +// SparkPodDefaulter defaults Spark pods. +type SparkPodDefaulter struct { + client client.Client + sparkJobNamespaces map[string]bool +} + +// SparkPodDefaulter implements admission.CustomDefaulter. +var _ admission.CustomDefaulter = &SparkPodDefaulter{} + +// NewSparkPodDefaulter creates a new SparkPodDefaulter instance. +func NewSparkPodDefaulter(client client.Client, sparkJobNamespaces []string) *SparkPodDefaulter { + m := make(map[string]bool) + for _, ns := range sparkJobNamespaces { + m[ns] = true + } + + return &SparkPodDefaulter{ + client: client, + sparkJobNamespaces: m, + } +} + +// Default implements admission.CustomDefaulter. +func (d *SparkPodDefaulter) Default(ctx context.Context, obj runtime.Object) error { + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil + } + + namespace := pod.Namespace + if !d.isSparkJobNamespace(namespace) { + return nil + } + + appName := pod.Labels[common.LabelSparkAppName] + if appName == "" { + return nil + } + + app := &v1beta2.SparkApplication{} + if err := d.client.Get(ctx, types.NamespacedName{Name: appName, Namespace: namespace}, app); err != nil { + return fmt.Errorf("failed to get SparkApplication %s/%s: %v", namespace, appName, err) + } + + logger.Info("Mutating Spark pod", "name", pod.Name, "namespace", namespace, "phase", pod.Status.Phase) + if err := mutateSparkPod(pod, app); err != nil { + logger.Info("Denying Spark pod", "name", pod.Name, "namespace", namespace, "errorMessage", err.Error()) + return fmt.Errorf("failed to mutate Spark pod: %v", err) + } + + return nil +} + +func (d *SparkPodDefaulter) isSparkJobNamespace(ns string) bool { + return d.sparkJobNamespaces[ns] +} + +type mutateSparkPodOption func(pod *corev1.Pod, app *v1beta2.SparkApplication) error + +func mutateSparkPod(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + options := []mutateSparkPodOption{ + addOwnerReference, + addEnvVars, + addEnvFrom, + addHadoopConfigMap, + addSparkConfigMap, + addGeneralConfigMaps, + addVolumes, + addContainerPorts, + addHostNetwork, + addHostAliases, + addInitContainers, + addSidecarContainers, + addDNSConfig, + addPriorityClassName, + addSchedulerName, + addNodeSelectors, + addAffinity, + addTolerations, + addGPU, + addPrometheusConfig, + addContainerSecurityContext, + addPodSecurityContext, + addTerminationGracePeriodSeconds, + addPodLifeCycleConfig, + addShareProcessNamespace, + } + + for _, option := range options { + if err := option(pod, app); err != nil { + return err + } + } + + return nil +} + +func addOwnerReference(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + if !util.IsDriverPod(pod) { + return nil + } + ownerReference := util.GetOwnerReference(app) + pod.ObjectMeta.OwnerReferences = append(pod.ObjectMeta.OwnerReferences, ownerReference) + return nil +} + +func addVolumes(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + volumes := app.Spec.Volumes + + volumeMap := make(map[string]corev1.Volume) + for _, v := range volumes { + volumeMap[v.Name] = v + } + + var volumeMounts []corev1.VolumeMount + if util.IsDriverPod(pod) { + volumeMounts = app.Spec.Driver.VolumeMounts + } else if util.IsExecutorPod(pod) { + volumeMounts = app.Spec.Executor.VolumeMounts + } + + addedVolumeMap := make(map[string]corev1.Volume) + for _, m := range volumeMounts { + // Skip adding localDirVolumes + if strings.HasPrefix(m.Name, common.SparkLocalDirVolumePrefix) { + continue + } + + if v, ok := volumeMap[m.Name]; ok { + if _, ok := addedVolumeMap[m.Name]; !ok { + _ = addVolume(pod, v) + addedVolumeMap[m.Name] = v + } + _ = addVolumeMount(pod, m) + } + } + return nil +} + +func addVolume(pod *corev1.Pod, volume corev1.Volume) error { + pod.Spec.Volumes = append(pod.Spec.Volumes, volume) + return nil +} + +func addVolumeMount(pod *corev1.Pod, mount corev1.VolumeMount) error { + i := findContainer(pod) + if i < 0 { + logger.Info("not able to add VolumeMount %s as Spark container was not found in pod %s", mount.Name, pod.Name) + } + + pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts, mount) + return nil +} + +func addEnvVars(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + i := findContainer(pod) + if util.IsDriverPod(pod) { + if len(app.Spec.Driver.Env) == 0 { + return nil + } else if i < 0 { + return fmt.Errorf("failed to add envs as driver container not found") + } + pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, app.Spec.Driver.Env...) + } else if util.IsExecutorPod(pod) { + if len(app.Spec.Driver.Env) == 0 { + return nil + } else if i < 0 { + return fmt.Errorf("failed to add envs as executor container not found") + } + pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, app.Spec.Executor.Env...) + } + return nil +} + +func addEnvFrom(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var envFrom []corev1.EnvFromSource + if util.IsDriverPod(pod) { + envFrom = app.Spec.Driver.EnvFrom + } else if util.IsExecutorPod(pod) { + envFrom = app.Spec.Executor.EnvFrom + } + + i := findContainer(pod) + if i < 0 { + return fmt.Errorf("not able to add EnvFrom as Spark container was not found in pod") + } + + pod.Spec.Containers[i].EnvFrom = append(pod.Spec.Containers[i].EnvFrom, envFrom...) + return nil +} + +func addEnvironmentVariable(pod *corev1.Pod, name, value string) error { + i := findContainer(pod) + if i < 0 { + return fmt.Errorf("not able to add environment variable as Spark container was not found") + } + + pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, corev1.EnvVar{ + Name: name, + Value: value, + }) + return nil +} + +func addSparkConfigMap(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + if app.Spec.SparkConfigMap == nil { + return nil + } + + if err := addConfigMapVolume(pod, *app.Spec.SparkConfigMap, common.SparkConfigMapVolumeName); err != nil { + return err + } + + if err := addConfigMapVolumeMount(pod, common.SparkConfigMapVolumeName, common.DefaultSparkConfDir); err != nil { + return err + } + + if err := addEnvironmentVariable(pod, common.EnvSparkConfDir, common.DefaultSparkConfDir); err != nil { + return err + } + + return nil +} + +func addHadoopConfigMap(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + if app.Spec.HadoopConfigMap == nil { + return nil + } + + if err := addConfigMapVolume(pod, *app.Spec.HadoopConfigMap, common.HadoopConfigMapVolumeName); err != nil { + return err + } + + if err := addConfigMapVolumeMount(pod, common.HadoopConfigMapVolumeName, common.DefaultHadoopConfDir); err != nil { + return err + } + + if err := addEnvironmentVariable(pod, common.EnvHadoopConfDir, common.DefaultHadoopConfDir); err != nil { + return err + } + + return nil +} + +func addGeneralConfigMaps(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var configMaps []v1beta2.NamePath + if util.IsDriverPod(pod) { + configMaps = app.Spec.Driver.ConfigMaps + } else if util.IsExecutorPod(pod) { + configMaps = app.Spec.Executor.ConfigMaps + } + + for _, namePath := range configMaps { + volumeName := namePath.Name + "-vol" + if len(volumeName) > maxNameLength { + volumeName = volumeName[0:maxNameLength] + logger.Info(fmt.Sprintf("ConfigMap volume name is too long. Truncating to length %d. Result: %s.", maxNameLength, volumeName)) + } + addConfigMapVolume(pod, namePath.Name, volumeName) + addConfigMapVolumeMount(pod, volumeName, namePath.Path) + } + return nil +} + +func addPrometheusConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + // Skip if Prometheus Monitoring is not enabled or an in-container ConfigFile is used, + // in which cases a Prometheus ConfigMap won't be created. + if !util.PrometheusMonitoringEnabled(app) || (util.HasMetricsPropertiesFile(app) && util.HasPrometheusConfigFile(app)) { + return nil + } + + if util.IsDriverPod(pod) && !util.ExposeDriverMetrics(app) { + return nil + } + if util.IsExecutorPod(pod) && !util.ExposeExecutorMetrics(app) { + return nil + } + + name := util.GetPrometheusConfigMapName(app) + volumeName := name + "-vol" + mountPath := common.PrometheusConfigMapMountPath + promPort := common.DefaultPrometheusJavaAgentPort + if app.Spec.Monitoring.Prometheus.Port != nil { + promPort = *app.Spec.Monitoring.Prometheus.Port + } + promProtocol := common.DefaultPrometheusPortProtocol + promPortName := common.DefaultPrometheusPortName + if app.Spec.Monitoring.Prometheus.PortName != nil { + promPortName = *app.Spec.Monitoring.Prometheus.PortName + } + addConfigMapVolume(pod, name, volumeName) + addConfigMapVolumeMount(pod, volumeName, mountPath) + logger.Info("could not mount volume %s in path %s", volumeName, mountPath) + addContainerPort(pod, promPort, promProtocol, promPortName) + logger.Info("could not expose port %d to scrape metrics outside the pod", promPort) + return nil +} + +func addContainerPorts(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var ports []v1beta2.Port + + if util.IsDriverPod(pod) { + ports = app.Spec.Driver.Ports + } else if util.IsExecutorPod(pod) { + ports = app.Spec.Executor.Ports + } + + for _, p := range ports { + addContainerPort(pod, p.ContainerPort, p.Protocol, p.Name) + { + logger.Info("could not expose port named %s", p.Name) + continue + } + } + return nil +} + +func addContainerPort(pod *corev1.Pod, port int32, protocol string, portName string) error { + i := findContainer(pod) + if i < 0 { + return fmt.Errorf("not able to add containerPort %d as Spark container was not found in pod", port) + } + + containerPort := corev1.ContainerPort{ + Name: portName, + ContainerPort: port, + Protocol: corev1.Protocol(protocol), + } + pod.Spec.Containers[i].Ports = append(pod.Spec.Containers[i].Ports, containerPort) + return nil +} + +func addConfigMapVolume(pod *corev1.Pod, configMapName string, configMapVolumeName string) error { + volume := corev1.Volume{ + Name: configMapVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMapName, + }, + }, + }, + } + return addVolume(pod, volume) +} + +func addConfigMapVolumeMount(pod *corev1.Pod, configMapVolumeName string, mountPath string) error { + mount := corev1.VolumeMount{ + Name: configMapVolumeName, + ReadOnly: true, + MountPath: mountPath, + } + return addVolumeMount(pod, mount) +} + +func addAffinity(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var affinity *corev1.Affinity + if util.IsDriverPod(pod) { + affinity = app.Spec.Driver.Affinity + } else if util.IsExecutorPod(pod) { + affinity = app.Spec.Executor.Affinity + } + if affinity == nil { + return nil + } + pod.Spec.Affinity = affinity.DeepCopy() + return nil +} + +func addTolerations(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var tolerations []corev1.Toleration + if util.IsDriverPod(pod) { + tolerations = app.Spec.Driver.SparkPodSpec.Tolerations + } else if util.IsExecutorPod(pod) { + tolerations = app.Spec.Executor.SparkPodSpec.Tolerations + } + + if pod.Spec.Tolerations == nil { + pod.Spec.Tolerations = []corev1.Toleration{} + } + + pod.Spec.Tolerations = append(pod.Spec.Tolerations, tolerations...) + return nil +} + +func addNodeSelectors(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var nodeSelector map[string]string + if util.IsDriverPod(pod) { + nodeSelector = app.Spec.Driver.NodeSelector + } else if util.IsExecutorPod(pod) { + nodeSelector = app.Spec.Executor.NodeSelector + } + + if pod.Spec.NodeSelector == nil { + pod.Spec.NodeSelector = make(map[string]string) + } + + for k, v := range nodeSelector { + pod.Spec.NodeSelector[k] = v + } + return nil +} + +func addDNSConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var dnsConfig *corev1.PodDNSConfig + if util.IsDriverPod(pod) { + dnsConfig = app.Spec.Driver.DNSConfig + } else if util.IsExecutorPod(pod) { + dnsConfig = app.Spec.Executor.DNSConfig + } + + if dnsConfig != nil { + pod.Spec.DNSConfig = dnsConfig + } + return nil +} + +func addSchedulerName(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var schedulerName *string + // NOTE: Preferred to use `BatchScheduler` if application spec has it configured. + if app.Spec.BatchScheduler != nil { + schedulerName = app.Spec.BatchScheduler + } else if util.IsDriverPod(pod) { + schedulerName = app.Spec.Driver.SchedulerName + } else if util.IsExecutorPod(pod) { + schedulerName = app.Spec.Executor.SchedulerName + } + + if schedulerName == nil || *schedulerName == "" { + return nil + } + + pod.Spec.SchedulerName = *schedulerName + return nil +} + +func addPriorityClassName(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var priorityClassName *string + if app.Spec.BatchSchedulerOptions != nil { + priorityClassName = app.Spec.BatchSchedulerOptions.PriorityClassName + } + + if priorityClassName != nil && *priorityClassName != "" { + pod.Spec.PriorityClassName = *priorityClassName + if pod.Spec.Priority != nil { + pod.Spec.Priority = nil + } + if pod.Spec.PreemptionPolicy != nil { + pod.Spec.PreemptionPolicy = nil + } + } + return nil +} + +func addPodSecurityContext(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var securityContext *corev1.PodSecurityContext + if util.IsDriverPod(pod) { + securityContext = app.Spec.Driver.PodSecurityContext + } else if util.IsExecutorPod(pod) { + securityContext = app.Spec.Executor.PodSecurityContext + } + + if securityContext != nil { + pod.Spec.SecurityContext = securityContext + } + return nil +} + +func addContainerSecurityContext(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + i := findContainer(pod) + if util.IsDriverPod(pod) { + if i < 0 { + return fmt.Errorf("driver container not found in pod") + } + if app.Spec.Driver.SecurityContext == nil { + return nil + } + pod.Spec.Containers[i].SecurityContext = app.Spec.Driver.SecurityContext + } else if util.IsExecutorPod(pod) { + if i < 0 { + return fmt.Errorf("executor container not found in pod") + } + if app.Spec.Driver.SecurityContext == nil { + return nil + } + pod.Spec.Containers[i].SecurityContext = app.Spec.Executor.SecurityContext + } + return nil +} + +func addSidecarContainers(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var sidecars []corev1.Container + if util.IsDriverPod(pod) { + sidecars = app.Spec.Driver.Sidecars + } else if util.IsExecutorPod(pod) { + sidecars = app.Spec.Executor.Sidecars + } + + for _, sidecar := range sidecars { + if !hasContainer(pod, &sidecar) { + pod.Spec.Containers = append(pod.Spec.Containers, *sidecar.DeepCopy()) + } + } + return nil +} + +func addInitContainers(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var initContainers []corev1.Container + if util.IsDriverPod(pod) { + initContainers = app.Spec.Driver.InitContainers + } else if util.IsExecutorPod(pod) { + initContainers = app.Spec.Executor.InitContainers + } + + if pod.Spec.InitContainers == nil { + pod.Spec.InitContainers = []corev1.Container{} + } + + for _, container := range initContainers { + if !hasInitContainer(pod, &container) { + pod.Spec.InitContainers = append(pod.Spec.InitContainers, *container.DeepCopy()) + } + } + return nil +} + +func addGPU(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var gpu *v1beta2.GPUSpec + if util.IsDriverPod(pod) { + gpu = app.Spec.Driver.GPU + } + if util.IsExecutorPod(pod) { + gpu = app.Spec.Executor.GPU + } + if gpu == nil { + return nil + } + if gpu.Name == "" { + logger.V(1).Info(fmt.Sprintf("Please specify GPU resource name, such as: nvidia.com/gpu, amd.com/gpu etc. Current gpu spec: %+v", gpu)) + return nil + } + if gpu.Quantity <= 0 { + logger.V(1).Info(fmt.Sprintf("GPU Quantity must be positive. Current gpu spec: %+v", gpu)) + return nil + } + + i := findContainer(pod) + if i < 0 { + return fmt.Errorf("not able to add GPU as Spark container was not found in pod %s", pod.Name) + } + if pod.Spec.Containers[i].Resources.Limits == nil { + pod.Spec.Containers[i].Resources.Limits = make(corev1.ResourceList) + } + pod.Spec.Containers[i].Resources.Limits[corev1.ResourceName(gpu.Name)] = *resource.NewQuantity(gpu.Quantity, resource.DecimalSI) + return nil +} + +func addHostNetwork(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var hostNetwork *bool + if util.IsDriverPod(pod) { + hostNetwork = app.Spec.Driver.HostNetwork + } + if util.IsExecutorPod(pod) { + hostNetwork = app.Spec.Executor.HostNetwork + } + + if hostNetwork == nil || !*hostNetwork { + return nil + } + + // For Pods with hostNetwork, explicitly set its DNS policy to “ClusterFirstWithHostNet” + // Detail: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + pod.Spec.HostNetwork = true + pod.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet + return nil +} + +func addTerminationGracePeriodSeconds(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var gracePeriodSeconds *int64 + if util.IsDriverPod(pod) { + gracePeriodSeconds = app.Spec.Driver.TerminationGracePeriodSeconds + } else if util.IsExecutorPod(pod) { + gracePeriodSeconds = app.Spec.Executor.TerminationGracePeriodSeconds + } + + if gracePeriodSeconds == nil { + return nil + } + + pod.Spec.TerminationGracePeriodSeconds = gracePeriodSeconds + return nil +} + +func addPodLifeCycleConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var lifeCycle *corev1.Lifecycle + var containerName string + if util.IsDriverPod(pod) { + lifeCycle = app.Spec.Driver.Lifecycle + containerName = common.SparkDriverContainerName + } else if util.IsExecutorPod(pod) { + lifeCycle = app.Spec.Executor.Lifecycle + containerName = common.SparkExecutorContainerName + } + if lifeCycle == nil { + return nil + } + + i := 0 + // Find the driver container in the pod. + for ; i < len(pod.Spec.Containers); i++ { + if pod.Spec.Containers[i].Name == containerName { + break + } + } + if i == len(pod.Spec.Containers) { + logger.Info("Spark container %s not found in pod %s", containerName, pod.Name) + return nil + } + + pod.Spec.Containers[i].Lifecycle = lifeCycle + return nil +} + +func addHostAliases(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var hostAliases []corev1.HostAlias + if util.IsDriverPod(pod) { + hostAliases = app.Spec.Driver.HostAliases + } else if util.IsExecutorPod(pod) { + hostAliases = app.Spec.Executor.HostAliases + } + + pod.Spec.HostAliases = append(pod.Spec.HostAliases, hostAliases...) + return nil +} + +func addShareProcessNamespace(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var shareProcessNamespace *bool + if util.IsDriverPod(pod) { + shareProcessNamespace = app.Spec.Driver.ShareProcessNamespace + } else if util.IsExecutorPod(pod) { + shareProcessNamespace = app.Spec.Executor.ShareProcessNamespace + } + + if shareProcessNamespace == nil || !*shareProcessNamespace { + return nil + } + + pod.Spec.ShareProcessNamespace = shareProcessNamespace + return nil +} + +func findContainer(pod *corev1.Pod) int { + var candidateContainerNames []string + if util.IsDriverPod(pod) { + candidateContainerNames = append(candidateContainerNames, common.SparkDriverContainerName) + } else if util.IsExecutorPod(pod) { + // Spark 3.x changed the default executor container name so we need to include both. + candidateContainerNames = append(candidateContainerNames, common.SparkExecutorContainerName, common.Spark3DefaultExecutorContainerName) + } + + if len(candidateContainerNames) == 0 { + return -1 + } + + for i := 0; i < len(pod.Spec.Containers); i++ { + for _, name := range candidateContainerNames { + if pod.Spec.Containers[i].Name == name { + return i + } + } + } + return -1 +} + +func hasContainer(pod *corev1.Pod, container *corev1.Container) bool { + for _, c := range pod.Spec.Containers { + if container.Name == c.Name && container.Image == c.Image { + return true + } + } + return false +} + +func hasInitContainer(pod *corev1.Pod, container *corev1.Container) bool { + for _, c := range pod.Spec.InitContainers { + if container.Name == c.Name && container.Image == c.Image { + return true + } + } + return false +} diff --git a/pkg/webhook/patch_test.go b/internal/webhook/sparkpod_defaulter_test.go similarity index 76% rename from pkg/webhook/patch_test.go rename to internal/webhook/sparkpod_defaulter_test.go index 99f821f37..f81eac24b 100644 --- a/pkg/webhook/patch_test.go +++ b/internal/webhook/sparkpod_defaulter_test.go @@ -17,19 +17,16 @@ limitations under the License. package webhook import ( - "encoding/json" "fmt" "testing" - jsonpatch "github.com/evanphx/json-patch" "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" ) func TestPatchSparkPod_OwnerReference(t *testing.T) { @@ -44,14 +41,14 @@ func TestPatchSparkPod_OwnerReference(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -63,7 +60,7 @@ func TestPatchSparkPod_OwnerReference(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedPod.OwnerReferences)) + assert.Len(t, modifiedPod.OwnerReferences, 1) // Test patching a pod with existing OwnerReference and Volume. pod.OwnerReferences = append(pod.OwnerReferences, metav1.OwnerReference{Name: "owner-reference1"}) @@ -72,7 +69,7 @@ func TestPatchSparkPod_OwnerReference(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedPod.OwnerReferences)) + assert.Len(t, modifiedPod.OwnerReferences, 2) } func TestPatchSparkPod_Local_Volumes(t *testing.T) { @@ -121,14 +118,14 @@ func TestPatchSparkPod_Local_Volumes(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -141,7 +138,7 @@ func TestPatchSparkPod_Local_Volumes(t *testing.T) { } // local volume will not be added by webhook - assert.Equal(t, 0, len(modifiedPod.Spec.Volumes)) + assert.Empty(t, modifiedPod.Spec.Volumes) } func TestPatchSparkPod_Volumes_Subpath(t *testing.T) { @@ -184,14 +181,14 @@ func TestPatchSparkPod_Volumes_Subpath(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -204,9 +201,9 @@ func TestPatchSparkPod_Volumes_Subpath(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedPod.Spec.Volumes)) + assert.Len(t, modifiedPod.Spec.Volumes, 1) assert.Equal(t, app.Spec.Volumes[0], modifiedPod.Spec.Volumes[0]) - assert.Equal(t, 2, len(modifiedPod.Spec.Containers[0].VolumeMounts)) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 2) assert.Equal(t, app.Spec.Driver.VolumeMounts[0], modifiedPod.Spec.Containers[0].VolumeMounts[0]) assert.Equal(t, app.Spec.Driver.VolumeMounts[1], modifiedPod.Spec.Containers[0].VolumeMounts[1]) } @@ -255,14 +252,14 @@ func TestPatchSparkPod_Volumes(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -275,10 +272,10 @@ func TestPatchSparkPod_Volumes(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedPod.Spec.Volumes)) + assert.Len(t, modifiedPod.Spec.Volumes, 2) assert.Equal(t, app.Spec.Volumes[0], modifiedPod.Spec.Volumes[0]) assert.Equal(t, app.Spec.Volumes[1], modifiedPod.Spec.Volumes[1]) - assert.Equal(t, 2, len(modifiedPod.Spec.Containers[0].VolumeMounts)) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 2) assert.Equal(t, app.Spec.Driver.VolumeMounts[0], modifiedPod.Spec.Containers[0].VolumeMounts[0]) assert.Equal(t, app.Spec.Driver.VolumeMounts[1], modifiedPod.Spec.Containers[0].VolumeMounts[1]) @@ -293,10 +290,10 @@ func TestPatchSparkPod_Volumes(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 3, len(modifiedPod.Spec.Volumes)) + assert.Len(t, modifiedPod.Spec.Volumes, 3) assert.Equal(t, app.Spec.Volumes[0], modifiedPod.Spec.Volumes[1]) assert.Equal(t, app.Spec.Volumes[1], modifiedPod.Spec.Volumes[2]) - assert.Equal(t, 3, len(modifiedPod.Spec.Containers[0].VolumeMounts)) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 3) assert.Equal(t, app.Spec.Driver.VolumeMounts[0], modifiedPod.Spec.Containers[0].VolumeMounts[1]) assert.Equal(t, app.Spec.Driver.VolumeMounts[1], modifiedPod.Spec.Containers[0].VolumeMounts[2]) } @@ -315,7 +312,7 @@ func TestPatchSparkPod_Affinity(t *testing.T) { RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ { LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{config.SparkRoleLabel: config.SparkDriverRole}, + MatchLabels: map[string]string{common.LabelSparkRole: common.SparkRoleDriver}, }, TopologyKey: "kubernetes.io/hostname", }, @@ -331,14 +328,14 @@ func TestPatchSparkPod_Affinity(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -351,9 +348,8 @@ func TestPatchSparkPod_Affinity(t *testing.T) { t.Fatal(err) } - assert.True(t, modifiedPod.Spec.Affinity != nil) - assert.Equal(t, 1, - len(modifiedPod.Spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution)) + assert.NotNil(t, modifiedPod.Spec.Affinity) + assert.Len(t, modifiedPod.Spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, 1) assert.Equal(t, "kubernetes.io/hostname", modifiedPod.Spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution[0].TopologyKey) } @@ -380,14 +376,14 @@ func TestPatchSparkPod_ConfigMaps(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -399,12 +395,12 @@ func TestPatchSparkPod_ConfigMaps(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedPod.Spec.Volumes)) + assert.Len(t, modifiedPod.Spec.Volumes, 2) assert.Equal(t, "foo-vol", modifiedPod.Spec.Volumes[0].Name) - assert.True(t, modifiedPod.Spec.Volumes[0].ConfigMap != nil) + assert.NotNil(t, modifiedPod.Spec.Volumes[0].ConfigMap) assert.Equal(t, "bar-vol", modifiedPod.Spec.Volumes[1].Name) - assert.True(t, modifiedPod.Spec.Volumes[1].ConfigMap != nil) - assert.Equal(t, 2, len(modifiedPod.Spec.Containers[0].VolumeMounts)) + assert.NotNil(t, modifiedPod.Spec.Volumes[1].ConfigMap) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 2) assert.Equal(t, "/path/to/foo", modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) assert.Equal(t, "/path/to/bar", modifiedPod.Spec.Containers[0].VolumeMounts[1].MountPath) } @@ -425,14 +421,14 @@ func TestPatchSparkPod_SparkConfigMap(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -444,13 +440,13 @@ func TestPatchSparkPod_SparkConfigMap(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedPod.Spec.Volumes)) - assert.Equal(t, config.SparkConfigMapVolumeName, modifiedPod.Spec.Volumes[0].Name) - assert.True(t, modifiedPod.Spec.Volumes[0].ConfigMap != nil) - assert.Equal(t, 1, len(modifiedPod.Spec.Containers[0].VolumeMounts)) - assert.Equal(t, config.DefaultSparkConfDir, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) - assert.Equal(t, 1, len(modifiedPod.Spec.Containers[0].Env)) - assert.Equal(t, config.DefaultSparkConfDir, modifiedPod.Spec.Containers[0].Env[0].Value) + assert.Len(t, modifiedPod.Spec.Volumes, 1) + assert.Equal(t, common.SparkConfigMapVolumeName, modifiedPod.Spec.Volumes[0].Name) + assert.NotNil(t, modifiedPod.Spec.Volumes[0].ConfigMap) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 1) + assert.Equal(t, common.DefaultSparkConfDir, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) + assert.Len(t, modifiedPod.Spec.Containers[0].Env, 1) + assert.Equal(t, common.DefaultSparkConfDir, modifiedPod.Spec.Containers[0].Env[0].Value) } func TestPatchSparkPod_HadoopConfigMap(t *testing.T) { @@ -469,14 +465,14 @@ func TestPatchSparkPod_HadoopConfigMap(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -488,75 +484,75 @@ func TestPatchSparkPod_HadoopConfigMap(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedPod.Spec.Volumes)) - assert.Equal(t, config.HadoopConfigMapVolumeName, modifiedPod.Spec.Volumes[0].Name) - assert.True(t, modifiedPod.Spec.Volumes[0].ConfigMap != nil) - assert.Equal(t, 1, len(modifiedPod.Spec.Containers[0].VolumeMounts)) - assert.Equal(t, config.DefaultHadoopConfDir, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) - assert.Equal(t, 1, len(modifiedPod.Spec.Containers[0].Env)) - assert.Equal(t, config.DefaultHadoopConfDir, modifiedPod.Spec.Containers[0].Env[0].Value) + assert.Len(t, modifiedPod.Spec.Volumes, 1) + assert.Equal(t, common.HadoopConfigMapVolumeName, modifiedPod.Spec.Volumes[0].Name) + assert.NotNil(t, modifiedPod.Spec.Volumes[0].ConfigMap) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 1) + assert.Equal(t, common.DefaultHadoopConfDir, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) + assert.Len(t, modifiedPod.Spec.Containers[0].Env, 1) + assert.Equal(t, common.DefaultHadoopConfDir, modifiedPod.Spec.Containers[0].Env[0].Value) } -func TestPatchSparkPod_PrometheusConfigMaps(t *testing.T) { - var appPort int32 = 9999 - appPortName := "jmx-exporter" - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Monitoring: &v1beta2.MonitoringSpec{ - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "", - Port: &appPort, - PortName: &appPortName, - ConfigFile: nil, - Configuration: nil, - }, - ExposeDriverMetrics: true, - }, - }, - } - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-driver", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: config.SparkDriverContainerName, - Image: "spark-driver:latest", - }, - }, - }, - } - - modifiedPod, err := getModifiedPod(pod, app) - if err != nil { - t.Fatal(err) - } - - expectedConfigMapName := config.GetPrometheusConfigMapName(app) - expectedVolumeName := expectedConfigMapName + "-vol" - expectedContainerPort := *app.Spec.Monitoring.Prometheus.Port - expectedContainerPortName := *app.Spec.Monitoring.Prometheus.PortName - assert.Equal(t, 1, len(modifiedPod.Spec.Volumes)) - assert.Equal(t, expectedVolumeName, modifiedPod.Spec.Volumes[0].Name) - assert.True(t, modifiedPod.Spec.Volumes[0].ConfigMap != nil) - assert.Equal(t, expectedConfigMapName, modifiedPod.Spec.Volumes[0].ConfigMap.Name) - assert.Equal(t, 1, len(modifiedPod.Spec.Containers[0].VolumeMounts)) - assert.Equal(t, expectedVolumeName, modifiedPod.Spec.Containers[0].VolumeMounts[0].Name) - assert.Equal(t, config.PrometheusConfigMapMountPath, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) - assert.Equal(t, expectedContainerPort, modifiedPod.Spec.Containers[0].Ports[0].ContainerPort) - assert.Equal(t, expectedContainerPortName, modifiedPod.Spec.Containers[0].Ports[0].Name) - assert.Equal(t, corev1.Protocol(config.DefaultPrometheusPortProtocol), modifiedPod.Spec.Containers[0].Ports[0].Protocol) -} +// func TestPatchSparkPod_PrometheusConfigMaps(t *testing.T) { +// var appPort int32 = 9999 +// appPortName := "jmx-exporter" +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Monitoring: &v1beta2.MonitoringSpec{ +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "", +// Port: &appPort, +// PortName: &appPortName, +// ConfigFile: nil, +// Configuration: nil, +// }, +// ExposeDriverMetrics: true, +// }, +// }, +// } + +// pod := &corev1.Pod{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-driver", +// Labels: map[string]string{ +// common.LabelSparkRole: common.SparkRoleDriver, +// common.LabelLaunchedBySparkOperator: "true", +// }, +// }, +// Spec: corev1.PodSpec{ +// Containers: []corev1.Container{ +// { +// Name: common.SparkDriverContainerName, +// Image: "spark-driver:latest", +// }, +// }, +// }, +// } + +// modifiedPod, err := getModifiedPod(pod, app) +// if err != nil { +// t.Fatal(err) +// } + +// expectedConfigMapName := GetPrometheusConfigMapName(app) +// expectedVolumeName := expectedConfigMapName + "-vol" +// expectedContainerPort := *app.Spec.Monitoring.Prometheus.Port +// expectedContainerPortName := *app.Spec.Monitoring.Prometheus.PortName +// assert.Len(t, modifiedPod.Spec.Volumes, 1) +// assert.Equal(t, expectedVolumeName, modifiedPod.Spec.Volumes[0].Name) +// assert.NotNil(t, modifiedPod.Spec.Volumes[0].ConfigMap) +// assert.Equal(t, expectedConfigMapName, modifiedPod.Spec.Volumes[0].ConfigMap.Name) +// assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 1) +// assert.Equal(t, expectedVolumeName, modifiedPod.Spec.Containers[0].VolumeMounts[0].Name) +// assert.Equal(t, common.PrometheusConfigMapMountPath, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) +// assert.Equal(t, expectedContainerPort, modifiedPod.Spec.Containers[0].Ports[0].ContainerPort) +// assert.Equal(t, expectedContainerPortName, modifiedPod.Spec.Containers[0].Ports[0].Name) +// assert.Equal(t, corev1.Protocol(common.DefaultPrometheusPortProtocol), modifiedPod.Spec.Containers[0].Ports[0].Protocol) +// } func TestPatchSparkPod_Tolerations(t *testing.T) { app := &v1beta2.SparkApplication{ @@ -591,14 +587,14 @@ func TestPatchSparkPod_Tolerations(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -610,7 +606,7 @@ func TestPatchSparkPod_Tolerations(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedPod.Spec.Tolerations)) + assert.Len(t, modifiedPod.Spec.Tolerations, 2) assert.Equal(t, app.Spec.Driver.Tolerations[0], modifiedPod.Spec.Tolerations[0]) assert.Equal(t, app.Spec.Driver.Tolerations[1], modifiedPod.Spec.Tolerations[1]) } @@ -655,14 +651,14 @@ func TestPatchSparkPod_SecurityContext(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -680,14 +676,14 @@ func TestPatchSparkPod_SecurityContext(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -727,15 +723,15 @@ func TestPatchSparkPod_SchedulerName(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ SchedulerName: defaultScheduler, Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -753,15 +749,15 @@ func TestPatchSparkPod_SchedulerName(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ SchedulerName: defaultScheduler, Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -801,14 +797,14 @@ func TestPatchSparkPod_PriorityClassName(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -822,20 +818,20 @@ func TestPatchSparkPod_PriorityClassName(t *testing.T) { //Driver priorityClassName should be populated when specified assert.Equal(t, priorityClassName, modifiedDriverPod.Spec.PriorityClassName) - var defaultPriority int32 = 0 + var defaultPriority int32 var defaultPolicy corev1.PreemptionPolicy = corev1.PreemptLowerPriority executorPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -896,14 +892,14 @@ func TestPatchSparkPod_Sidecars(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -914,7 +910,7 @@ func TestPatchSparkPod_Sidecars(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 3, len(modifiedDriverPod.Spec.Containers)) + assert.Len(t, modifiedDriverPod.Spec.Containers, 3) assert.Equal(t, "sidecar1", modifiedDriverPod.Spec.Containers[1].Name) assert.Equal(t, "sidecar2", modifiedDriverPod.Spec.Containers[2].Name) @@ -922,14 +918,14 @@ func TestPatchSparkPod_Sidecars(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -940,7 +936,7 @@ func TestPatchSparkPod_Sidecars(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 3, len(modifiedExecutorPod.Spec.Containers)) + assert.Len(t, modifiedExecutorPod.Spec.Containers, 3) assert.Equal(t, "sidecar1", modifiedExecutorPod.Spec.Containers[1].Name) assert.Equal(t, "sidecar2", modifiedExecutorPod.Spec.Containers[2].Name) } @@ -987,14 +983,14 @@ func TestPatchSparkPod_InitContainers(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1005,7 +1001,7 @@ func TestPatchSparkPod_InitContainers(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedDriverPod.Spec.InitContainers)) + assert.Len(t, modifiedDriverPod.Spec.InitContainers, 2) assert.Equal(t, "init-container1", modifiedDriverPod.Spec.InitContainers[0].Name) assert.Equal(t, "init-container2", modifiedDriverPod.Spec.InitContainers[1].Name) @@ -1013,14 +1009,14 @@ func TestPatchSparkPod_InitContainers(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1031,7 +1027,7 @@ func TestPatchSparkPod_InitContainers(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedExecutorPod.Spec.InitContainers)) + assert.Len(t, modifiedExecutorPod.Spec.InitContainers, 2) assert.Equal(t, "init-container1", modifiedExecutorPod.Spec.InitContainers[0].Name) assert.Equal(t, "init-container2", modifiedExecutorPod.Spec.InitContainers[1].Name) } @@ -1065,14 +1061,14 @@ func TestPatchSparkPod_DNSConfig(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1090,14 +1086,14 @@ func TestPatchSparkPod_DNSConfig(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1138,14 +1134,14 @@ func TestPatchSparkPod_NodeSector(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1156,7 +1152,7 @@ func TestPatchSparkPod_NodeSector(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedDriverPod.Spec.NodeSelector)) + assert.Len(t, modifiedDriverPod.Spec.NodeSelector, 2) assert.Equal(t, "ssd", modifiedDriverPod.Spec.NodeSelector["disk"]) assert.Equal(t, "secondvalue", modifiedDriverPod.Spec.NodeSelector["secondkey"]) @@ -1164,14 +1160,14 @@ func TestPatchSparkPod_NodeSector(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1182,7 +1178,7 @@ func TestPatchSparkPod_NodeSector(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedExecutorPod.Spec.NodeSelector)) + assert.Len(t, modifiedExecutorPod.Spec.NodeSelector, 2) assert.Equal(t, "gpu", modifiedExecutorPod.Spec.NodeSelector["nodeType"]) assert.Equal(t, "secondvalue", modifiedExecutorPod.Spec.NodeSelector["secondkey"]) } @@ -1328,14 +1324,14 @@ func TestPatchSparkPod_GPU(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1355,14 +1351,14 @@ func TestPatchSparkPod_GPU(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1411,14 +1407,14 @@ func TestPatchSparkPod_HostNetwork(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1430,23 +1426,23 @@ func TestPatchSparkPod_HostNetwork(t *testing.T) { t.Fatal(err) } if test == nil || *test == false { - assert.Equal(t, false, modifiedDriverPod.Spec.HostNetwork) + assert.False(t, modifiedDriverPod.Spec.HostNetwork) } else { - assert.Equal(t, true, modifiedDriverPod.Spec.HostNetwork) + assert.True(t, true, modifiedDriverPod.Spec.HostNetwork) assert.Equal(t, corev1.DNSClusterFirstWithHostNet, modifiedDriverPod.Spec.DNSPolicy) } executorPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1458,9 +1454,9 @@ func TestPatchSparkPod_HostNetwork(t *testing.T) { t.Fatal(err) } if test == nil || *test == false { - assert.Equal(t, false, modifiedExecutorPod.Spec.HostNetwork) + assert.False(t, modifiedExecutorPod.Spec.HostNetwork) } else { - assert.Equal(t, true, modifiedExecutorPod.Spec.HostNetwork) + assert.True(t, true, modifiedExecutorPod.Spec.HostNetwork) assert.Equal(t, corev1.DNSClusterFirstWithHostNet, modifiedExecutorPod.Spec.DNSPolicy) } } @@ -1505,14 +1501,14 @@ func TestPatchSparkPod_Env(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-driver:latest", }, }, @@ -1523,14 +1519,14 @@ func TestPatchSparkPod_Env(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1542,20 +1538,20 @@ func TestPatchSparkPod_Env(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedExecutorPod.Spec.Containers[0].Env)) + assert.Len(t, modifiedExecutorPod.Spec.Containers[0].Env, 1) assert.Equal(t, exeEnvKey, modifiedExecutorPod.Spec.Containers[0].Env[0].Name) assert.Equal(t, exeEnvVal, modifiedExecutorPod.Spec.Containers[0].Env[0].Value) - assert.True(t, modifiedExecutorPod.Spec.Containers[0].Env[0].ValueFrom == nil) + assert.Nil(t, modifiedExecutorPod.Spec.Containers[0].Env[0].ValueFrom) modifiedDriverPod, err := getModifiedPod(driverPod, app) if err != nil { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedDriverPod.Spec.Containers[0].Env)) + assert.Len(t, modifiedDriverPod.Spec.Containers[0].Env, 1) assert.Equal(t, drvEnvKey, modifiedDriverPod.Spec.Containers[0].Env[0].Name) assert.Equal(t, drvEnvVal, modifiedDriverPod.Spec.Containers[0].Env[0].Value) - assert.True(t, modifiedDriverPod.Spec.Containers[0].Env[0].ValueFrom == nil) + assert.Nil(t, modifiedDriverPod.Spec.Containers[0].Env[0].ValueFrom) } func TestPatchSparkPod_EnvFrom(t *testing.T) { @@ -1615,14 +1611,14 @@ func TestPatchSparkPod_EnvFrom(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1633,14 +1629,14 @@ func TestPatchSparkPod_EnvFrom(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-driver:latest", }, }, @@ -1651,7 +1647,7 @@ func TestPatchSparkPod_EnvFrom(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedDriverPod.Spec.Containers[0].EnvFrom)) + assert.Len(t, modifiedDriverPod.Spec.Containers[0].EnvFrom, 2) assert.Equal(t, configMapName, modifiedDriverPod.Spec.Containers[0].EnvFrom[0].ConfigMapRef.Name) assert.Equal(t, secretName, modifiedDriverPod.Spec.Containers[0].EnvFrom[1].SecretRef.Name) @@ -1659,7 +1655,7 @@ func TestPatchSparkPod_EnvFrom(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedExecutorPod.Spec.Containers[0].EnvFrom)) + assert.Len(t, modifiedExecutorPod.Spec.Containers[0].EnvFrom, 2) assert.Equal(t, configMapName, modifiedExecutorPod.Spec.Containers[0].EnvFrom[0].ConfigMapRef.Name) assert.Equal(t, secretName, modifiedExecutorPod.Spec.Containers[0].EnvFrom[1].SecretRef.Name) } @@ -1694,14 +1690,14 @@ func TestPatchSparkPod_GracePeriodSeconds(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1713,7 +1709,7 @@ func TestPatchSparkPod_GracePeriodSeconds(t *testing.T) { t.Fatal(err) } if test == nil { - assert.True(t, modifiedDriverPod.Spec.TerminationGracePeriodSeconds == nil) + assert.Nil(t, modifiedDriverPod.Spec.TerminationGracePeriodSeconds) } else { assert.Equal(t, int64(60), *modifiedDriverPod.Spec.TerminationGracePeriodSeconds) } @@ -1722,14 +1718,14 @@ func TestPatchSparkPod_GracePeriodSeconds(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1741,7 +1737,7 @@ func TestPatchSparkPod_GracePeriodSeconds(t *testing.T) { t.Fatal(err) } if test == nil { - assert.True(t, modifiedDriverPod.Spec.TerminationGracePeriodSeconds == nil) + assert.Nil(t, modifiedDriverPod.Spec.TerminationGracePeriodSeconds) } else { assert.Equal(t, int64(60), *modifiedExecPod.Spec.TerminationGracePeriodSeconds) } @@ -1778,14 +1774,14 @@ func TestPatchSparkPod_Lifecycle(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1796,14 +1792,14 @@ func TestPatchSparkPod_Lifecycle(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1822,31 +1818,12 @@ func TestPatchSparkPod_Lifecycle(t *testing.T) { assert.Equal(t, postStartTest, modifiedExecutorPod.Spec.Containers[0].Lifecycle.PostStart.Exec) } -func getModifiedPod(pod *corev1.Pod, app *v1beta2.SparkApplication) (*corev1.Pod, error) { - patchOps := patchSparkPod(pod.DeepCopy(), app) - patchBytes, err := json.Marshal(patchOps) - if err != nil { - return nil, err - } - patch, err := jsonpatch.DecodePatch(patchBytes) - if err != nil { - return nil, err - } - - original, err := json.Marshal(pod) - if err != nil { - return nil, err - } - modified, err := patch.Apply(original) - if err != nil { - return nil, err - } - modifiedPod := &corev1.Pod{} - if err := json.Unmarshal(modified, modifiedPod); err != nil { +func getModifiedPod(old *corev1.Pod, app *v1beta2.SparkApplication) (*corev1.Pod, error) { + newPod := old.DeepCopy() + if err := mutateSparkPod(newPod, app); err != nil { return nil, err } - - return modifiedPod, nil + return newPod, nil } func TestPatchSparkPod_HostAliases(t *testing.T) { @@ -1899,14 +1876,14 @@ func TestPatchSparkPod_HostAliases(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1917,7 +1894,7 @@ func TestPatchSparkPod_HostAliases(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 4, len(modifiedDriverPod.Spec.HostAliases)) + assert.Len(t, modifiedDriverPod.Spec.HostAliases, 4) assert.Equal(t, "127.0.0.1", modifiedDriverPod.Spec.HostAliases[0].IP) assert.Equal(t, "192.168.0.1", modifiedDriverPod.Spec.HostAliases[1].IP) assert.Equal(t, "192.168.0.2", modifiedDriverPod.Spec.HostAliases[2].IP) @@ -1927,14 +1904,14 @@ func TestPatchSparkPod_HostAliases(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1945,7 +1922,7 @@ func TestPatchSparkPod_HostAliases(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedExecutorPod.Spec.HostAliases)) + assert.Len(t, modifiedExecutorPod.Spec.HostAliases, 2) assert.Equal(t, "127.0.0.1", modifiedExecutorPod.Spec.HostAliases[0].IP) assert.Equal(t, "192.168.0.1", modifiedExecutorPod.Spec.HostAliases[1].IP) } @@ -1976,14 +1953,14 @@ func TestPatchSparkPod_Ports(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1995,7 +1972,7 @@ func TestPatchSparkPod_Ports(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedDriverPod.Spec.Containers[0].Ports)) + assert.Len(t, modifiedDriverPod.Spec.Containers[0].Ports, 2) assert.Equal(t, "driverPort1", modifiedDriverPod.Spec.Containers[0].Ports[0].Name) assert.Equal(t, "driverPort2", modifiedDriverPod.Spec.Containers[0].Ports[1].Name) assert.Equal(t, int32(8080), modifiedDriverPod.Spec.Containers[0].Ports[0].ContainerPort) @@ -2005,14 +1982,14 @@ func TestPatchSparkPod_Ports(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -2023,7 +2000,7 @@ func TestPatchSparkPod_Ports(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedExecutorPod.Spec.Containers[0].Ports)) + assert.Len(t, modifiedExecutorPod.Spec.Containers[0].Ports, 2) assert.Equal(t, "executorPort1", modifiedExecutorPod.Spec.Containers[0].Ports[0].Name) assert.Equal(t, "executorPort2", modifiedExecutorPod.Spec.Containers[0].Ports[1].Name) assert.Equal(t, int32(8082), modifiedExecutorPod.Spec.Containers[0].Ports[0].ContainerPort) @@ -2046,8 +2023,8 @@ func TestPatchSparkPod_ShareProcessNamespace(t *testing.T) { }, } - var shareProcessNamespaceTrue = true - var shareProcessNamespaceFalse = false + shareProcessNamespaceTrue := true + shareProcessNamespaceFalse := false tests := []*bool{ nil, &shareProcessNamespaceTrue, @@ -2055,21 +2032,20 @@ func TestPatchSparkPod_ShareProcessNamespace(t *testing.T) { } for _, test := range tests { - app.Spec.Driver.ShareProcessNamespace = test app.Spec.Executor.ShareProcessNamespace = test driverPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -2080,14 +2056,14 @@ func TestPatchSparkPod_ShareProcessNamespace(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -2108,8 +2084,8 @@ func TestPatchSparkPod_ShareProcessNamespace(t *testing.T) { assert.Nil(t, modifiedDriverPod.Spec.ShareProcessNamespace) assert.Nil(t, modifiedExecutorPod.Spec.ShareProcessNamespace) } else { - assert.Equal(t, true, *modifiedDriverPod.Spec.ShareProcessNamespace) - assert.Equal(t, true, *modifiedExecutorPod.Spec.ShareProcessNamespace) + assert.True(t, *modifiedDriverPod.Spec.ShareProcessNamespace) + assert.True(t, *modifiedExecutorPod.Spec.ShareProcessNamespace) } } } diff --git a/internal/webhook/suite_test.go b/internal/webhook/suite_test.go new file mode 100644 index 000000000..4ce5dc1fe --- /dev/null +++ b/internal/webhook/suite_test.go @@ -0,0 +1,150 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook_test + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "path/filepath" + "runtime" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + admissionv1 "k8s.io/api/admission/v1" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestWebhooks(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Webhook Suite") +} + +var _ = BeforeSuite(func() { + log.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: false, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "..", "bin", "k8s", + fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "config", "webhook")}, + }, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = v1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + err = admissionv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // start webhook server using Manager + webhookInstallOptions := &testEnv.WebhookInstallOptions + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + WebhookServer: webhook.NewServer(webhook.Options{ + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + }), + LeaderElection: false, + Metrics: metricsserver.Options{BindAddress: "0"}, + }) + Expect(err).NotTo(HaveOccurred()) + + // err = (&v1beta2.SparkApplication{}).SetupWebhookWithManager(mgr) + // Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:webhook + + go func() { + defer GinkgoRecover() + err = mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + // wait for the webhook server to get ready + dialer := &net.Dialer{Timeout: time.Second} + addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) + Eventually(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) + if err != nil { + return err + } + return conn.Close() + }).Should(Succeed()) + +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/internal/webhook/webhook.go b/internal/webhook/webhook.go new file mode 100644 index 000000000..8a2088f7a --- /dev/null +++ b/internal/webhook/webhook.go @@ -0,0 +1,37 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + logger = ctrl.Log.WithName("") +) + +type Options struct { + SparkJobNamespaces []string + WebhookName string + WebhookPort int + WebhookSecretName string + WebhookSecretNamespace string + WebhookServiceName string + WebhookServiceNamespace string + WebhookMetricsBindAddress string + EnableResourceQuotaEnforcement bool +} diff --git a/main.go b/main.go deleted file mode 100644 index 58f6505b7..000000000 --- a/main.go +++ /dev/null @@ -1,312 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "context" - "flag" - "fmt" - "os" - "os/signal" - "strings" - "syscall" - "time" - - "github.com/golang/glog" - apiv1 "k8s.io/api/core/v1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/leaderelection" - "k8s.io/client-go/tools/leaderelection/resourcelock" - "k8s.io/client-go/tools/record" - "k8s.io/utils/clock" - - "github.com/kubeflow/spark-operator/pkg/batchscheduler" - crclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" - crinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - operatorConfig "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/controller/scheduledsparkapplication" - "github.com/kubeflow/spark-operator/pkg/controller/sparkapplication" - "github.com/kubeflow/spark-operator/pkg/util" - "github.com/kubeflow/spark-operator/pkg/webhook" -) - -var ( - master = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") - kubeConfig = flag.String("kubeConfig", "", "Path to a kube config. Only required if out-of-cluster.") - controllerThreads = flag.Int("controller-threads", 10, "Number of worker threads used by the SparkApplication controller.") - resyncInterval = flag.Int("resync-interval", 30, "Informer resync interval in seconds.") - namespace = flag.String("namespace", apiv1.NamespaceAll, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.") - labelSelectorFilter = flag.String("label-selector-filter", "", "A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels.") - enableWebhook = flag.Bool("enable-webhook", false, "Whether to enable the mutating admission webhook for admitting and patching Spark pods.") - webhookTimeout = flag.Int("webhook-timeout", 30, "Webhook Timeout in seconds before the webhook returns a timeout") - enableResourceQuotaEnforcement = flag.Bool("enable-resource-quota-enforcement", false, "Whether to enable ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled.") - ingressURLFormat = flag.String("ingress-url-format", "", "Ingress URL format.") - enableUIService = flag.Bool("enable-ui-service", true, "Enable Spark service UI.") - enableLeaderElection = flag.Bool("leader-election", false, "Enable Spark operator leader election.") - leaderElectionLockNamespace = flag.String("leader-election-lock-namespace", "spark-operator", "Namespace in which to create the ConfigMap for leader election.") - leaderElectionLockName = flag.String("leader-election-lock-name", "spark-operator-lock", "Name of the ConfigMap for leader election.") - leaderElectionLeaseDuration = flag.Duration("leader-election-lease-duration", 15*time.Second, "Leader election lease duration.") - leaderElectionRenewDeadline = flag.Duration("leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.") - leaderElectionRetryPeriod = flag.Duration("leader-election-retry-period", 4*time.Second, "Leader election retry period.") - enableBatchScheduler = flag.Bool("enable-batch-scheduler", false, fmt.Sprintf("Enable batch schedulers for pods' scheduling, the available batch schedulers are: (%s).", strings.Join(batchscheduler.GetRegisteredNames(), ","))) - enableMetrics = flag.Bool("enable-metrics", false, "Whether to enable the metrics endpoint.") - metricsPort = flag.String("metrics-port", "10254", "Port for the metrics endpoint.") - metricsEndpoint = flag.String("metrics-endpoint", "/metrics", "Metrics endpoint.") - metricsPrefix = flag.String("metrics-prefix", "", "Prefix for the metrics.") - ingressClassName = flag.String("ingress-class-name", "", "Set ingressClassName for ingress resources created.") - metricsLabels util.ArrayFlags - metricsJobStartLatencyBuckets util.HistogramBuckets = util.DefaultJobStartLatencyBuckets -) - -func main() { - flag.Var(&metricsLabels, "metrics-labels", "Labels for the metrics") - flag.Var(&metricsJobStartLatencyBuckets, "metrics-job-start-latency-buckets", - "Comma-separated boundary values (in seconds) for the job start latency histogram bucket; "+ - "it accepts any numerical values that can be parsed into a 64-bit floating point") - flag.Parse() - - // Create the client config. Use kubeConfig if given, otherwise assume in-cluster. - config, err := buildConfig(*master, *kubeConfig) - if err != nil { - glog.Fatal(err) - } - kubeClient, err := clientset.NewForConfig(config) - if err != nil { - glog.Fatal(err) - } - - signalCh := make(chan os.Signal, 1) - signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM) - - stopCh := make(chan struct{}, 1) - startCh := make(chan struct{}, 1) - - if *enableLeaderElection { - podName := os.Getenv("POD_NAME") - hostname, err := os.Hostname() - if err != nil { - glog.Fatal(err) - } - broadcaster := record.NewBroadcaster() - source := apiv1.EventSource{Component: "spark-operator-leader-elector", Host: hostname} - recorder := broadcaster.NewRecorder(scheme.Scheme, source) - resourceLock := &resourcelock.LeaseLock{ - LeaseMeta: metav1.ObjectMeta{ - Namespace: *leaderElectionLockNamespace, - Name: *leaderElectionLockName, - }, - Client: kubeClient.CoordinationV1(), - LockConfig: resourcelock.ResourceLockConfig{ - Identity: podName, - EventRecorder: recorder, - }, - } - if err != nil { - glog.Fatal(err) - } - - electionCfg := leaderelection.LeaderElectionConfig{ - Lock: resourceLock, - LeaseDuration: *leaderElectionLeaseDuration, - RenewDeadline: *leaderElectionRenewDeadline, - RetryPeriod: *leaderElectionRetryPeriod, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: func(c context.Context) { - close(startCh) - }, - OnStoppedLeading: func() { - close(stopCh) - }, - }, - } - - elector, err := leaderelection.NewLeaderElector(electionCfg) - if err != nil { - glog.Fatal(err) - } - - go elector.Run(context.Background()) - } - - glog.Info("Starting the Spark Operator") - - crClient, err := crclientset.NewForConfig(config) - if err != nil { - glog.Fatal(err) - } - apiExtensionsClient, err := apiextensionsclient.NewForConfig(config) - if err != nil { - glog.Fatal(err) - } - - if err = util.InitializeIngressCapabilities(kubeClient); err != nil { - glog.Fatalf("Error retrieving Kubernetes cluster capabilities: %s", err.Error()) - } - - var batchSchedulerMgr *batchscheduler.SchedulerManager - if *enableBatchScheduler { - if !*enableWebhook { - glog.Fatal( - "failed to initialize the batch scheduler manager as it requires the webhook to be enabled") - } - batchSchedulerMgr = batchscheduler.NewSchedulerManager(config) - } - - crInformerFactory := buildCustomResourceInformerFactory(crClient) - podInformerFactory := buildPodInformerFactory(kubeClient) - - var metricConfig *util.MetricConfig - if *enableMetrics { - metricConfig = &util.MetricConfig{ - MetricsEndpoint: *metricsEndpoint, - MetricsPort: *metricsPort, - MetricsPrefix: *metricsPrefix, - MetricsLabels: metricsLabels, - MetricsJobStartLatencyBuckets: metricsJobStartLatencyBuckets, - } - - glog.Info("Enabling metrics collecting and exporting to Prometheus") - util.InitializeMetrics(metricConfig) - } - - applicationController := sparkapplication.NewController( - crClient, kubeClient, crInformerFactory, podInformerFactory, metricConfig, *namespace, *ingressURLFormat, *ingressClassName, batchSchedulerMgr, *enableUIService) - scheduledApplicationController := scheduledsparkapplication.NewController( - crClient, kubeClient, apiExtensionsClient, crInformerFactory, clock.RealClock{}) - - // Start the informer factory that in turn starts the informer. - go crInformerFactory.Start(stopCh) - go podInformerFactory.Start(stopCh) - - var hook *webhook.WebHook - if *enableWebhook { - var coreV1InformerFactory informers.SharedInformerFactory - if *enableResourceQuotaEnforcement { - coreV1InformerFactory = buildCoreV1InformerFactory(kubeClient) - } - var err error - // Don't deregister webhook on exit if leader election enabled (i.e. multiple webhooks running) - hook, err = webhook.New(kubeClient, crInformerFactory, *namespace, !*enableLeaderElection, *enableResourceQuotaEnforcement, coreV1InformerFactory, webhookTimeout) - if err != nil { - glog.Fatal(err) - } - - if *enableResourceQuotaEnforcement { - go coreV1InformerFactory.Start(stopCh) - } - - if err = hook.Start(stopCh); err != nil { - glog.Fatal(err) - } - } else if *enableResourceQuotaEnforcement { - glog.Fatal("Webhook must be enabled to use resource quota enforcement.") - } - - if *enableLeaderElection { - glog.Info("Waiting to be elected leader before starting application controller goroutines") - select { - case <-signalCh: - os.Exit(0) - case <-startCh: - } - } - - glog.Info("Starting application controller goroutines") - - if err = applicationController.Start(*controllerThreads, stopCh); err != nil { - glog.Fatal(err) - } - if err = scheduledApplicationController.Start(*controllerThreads, stopCh); err != nil { - glog.Fatal(err) - } - - select { - case <-signalCh: - close(stopCh) - case <-stopCh: - } - - glog.Info("Shutting down the Spark Operator") - applicationController.Stop() - scheduledApplicationController.Stop() - if *enableWebhook { - if err := hook.Stop(); err != nil { - glog.Fatal(err) - } - } -} - -func buildConfig(masterURL string, kubeConfig string) (*rest.Config, error) { - if kubeConfig != "" { - return clientcmd.BuildConfigFromFlags(masterURL, kubeConfig) - } - return rest.InClusterConfig() -} - -func buildCustomResourceInformerFactory(crClient crclientset.Interface) crinformers.SharedInformerFactory { - var factoryOpts []crinformers.SharedInformerOption - if *namespace != apiv1.NamespaceAll { - factoryOpts = append(factoryOpts, crinformers.WithNamespace(*namespace)) - } - if len(*labelSelectorFilter) > 0 { - tweakListOptionsFunc := func(options *metav1.ListOptions) { - options.LabelSelector = *labelSelectorFilter - } - factoryOpts = append(factoryOpts, crinformers.WithTweakListOptions(tweakListOptionsFunc)) - } - return crinformers.NewSharedInformerFactoryWithOptions( - crClient, - // resyncPeriod. Every resyncPeriod, all resources in the cache will re-trigger events. - time.Duration(*resyncInterval)*time.Second, - factoryOpts...) -} - -func buildPodInformerFactory(kubeClient clientset.Interface) informers.SharedInformerFactory { - var podFactoryOpts []informers.SharedInformerOption - if *namespace != apiv1.NamespaceAll { - podFactoryOpts = append(podFactoryOpts, informers.WithNamespace(*namespace)) - } - tweakListOptionsFunc := func(options *metav1.ListOptions) { - options.LabelSelector = fmt.Sprintf("%s,%s", operatorConfig.SparkRoleLabel, operatorConfig.LaunchedBySparkOperatorLabel) - if len(*labelSelectorFilter) > 0 { - options.LabelSelector = options.LabelSelector + "," + *labelSelectorFilter - } - } - podFactoryOpts = append(podFactoryOpts, informers.WithTweakListOptions(tweakListOptionsFunc)) - return informers.NewSharedInformerFactoryWithOptions(kubeClient, time.Duration(*resyncInterval)*time.Second, podFactoryOpts...) -} - -func buildCoreV1InformerFactory(kubeClient clientset.Interface) informers.SharedInformerFactory { - var coreV1FactoryOpts []informers.SharedInformerOption - if *namespace != apiv1.NamespaceAll { - coreV1FactoryOpts = append(coreV1FactoryOpts, informers.WithNamespace(*namespace)) - } - if len(*labelSelectorFilter) > 0 { - tweakListOptionsFunc := func(options *metav1.ListOptions) { - options.LabelSelector = *labelSelectorFilter - } - coreV1FactoryOpts = append(coreV1FactoryOpts, informers.WithTweakListOptions(tweakListOptionsFunc)) - } - return informers.NewSharedInformerFactoryWithOptions(kubeClient, time.Duration(*resyncInterval)*time.Second, coreV1FactoryOpts...) -} diff --git a/manifest/crds/kustomization.yaml b/manifest/crds/kustomization.yaml deleted file mode 100644 index ed4efc5a8..000000000 --- a/manifest/crds/kustomization.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - sparkoperator.k8s.io_sparkapplications.yaml - - sparkoperator.k8s.io_scheduledsparkapplications.yaml diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml deleted file mode 100644 index 60e836b08..000000000 --- a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ /dev/null @@ -1,4496 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: (unknown) - api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 - name: scheduledsparkapplications.sparkoperator.k8s.io -spec: - group: sparkoperator.k8s.io - names: - kind: ScheduledSparkApplication - listKind: ScheduledSparkApplicationList - plural: scheduledsparkapplications - shortNames: - - scheduledsparkapp - singular: scheduledsparkapplication - scope: Namespaced - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .spec.suspend - name: Suspend - type: boolean - - jsonPath: .status.lastRun - name: Last Run - type: date - - jsonPath: .status.lastRunName - name: Last Run Name - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - concurrencyPolicy: - type: string - failedRunHistoryLimit: - format: int32 - type: integer - schedule: - type: string - successfulRunHistoryLimit: - format: int32 - type: integer - suspend: - type: boolean - template: - properties: - arguments: - items: - type: string - type: array - batchScheduler: - type: string - batchSchedulerOptions: - properties: - priorityClassName: - type: string - queue: - type: string - resources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - deps: - properties: - excludePackages: - items: - type: string - type: array - files: - items: - type: string - type: array - jars: - items: - type: string - type: array - packages: - items: - type: string - type: array - pyFiles: - items: - type: string - type: array - repositories: - items: - type: string - type: array - type: object - driver: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: - type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - javaOptions: - type: string - kubernetesMaster: - type: string - labels: - additionalProperties: - type: string - type: object - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podName: - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' - type: string - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - type: object - dynamicAllocation: - properties: - enabled: - type: boolean - initialExecutors: - format: int32 - type: integer - maxExecutors: - format: int32 - type: integer - minExecutors: - format: int32 - type: integer - shuffleTrackingTimeout: - format: int64 - type: integer - type: object - executor: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - deleteOnTermination: - type: boolean - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: - type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - instances: - format: int32 - minimum: 1 - type: integer - javaOptions: - type: string - labels: - additionalProperties: - type: string - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - type: object - failureRetries: - format: int32 - type: integer - hadoopConf: - additionalProperties: - type: string - type: object - hadoopConfigMap: - type: string - image: - type: string - imagePullPolicy: - type: string - imagePullSecrets: - items: - type: string - type: array - mainApplicationFile: - type: string - mainClass: - type: string - memoryOverheadFactor: - type: string - mode: - enum: - - cluster - - client - type: string - monitoring: - properties: - exposeDriverMetrics: - type: boolean - exposeExecutorMetrics: - type: boolean - metricsProperties: - type: string - metricsPropertiesFile: - type: string - prometheus: - properties: - configFile: - type: string - configuration: - type: string - jmxExporterJar: - type: string - port: - format: int32 - maximum: 49151 - minimum: 1024 - type: integer - portName: - type: string - required: - - jmxExporterJar - type: object - required: - - exposeDriverMetrics - - exposeExecutorMetrics - type: object - nodeSelector: - additionalProperties: - type: string - type: object - proxyUser: - type: string - pythonVersion: - enum: - - "2" - - "3" - type: string - restartPolicy: - properties: - onFailureRetries: - format: int32 - minimum: 0 - type: integer - onFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - onSubmissionFailureRetries: - format: int32 - minimum: 0 - type: integer - onSubmissionFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - type: - enum: - - Never - - Always - - OnFailure - type: string - type: object - retryInterval: - format: int64 - type: integer - sparkConf: - additionalProperties: - type: string - type: object - sparkConfigMap: - type: string - sparkUIOptions: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - serviceType: - type: string - type: object - driverIngressOptions: - items: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressURLFormat: - type: string - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string - type: object - type: array - sparkVersion: - type: string - timeToLiveSeconds: - format: int64 - type: integer - type: - enum: - - Java - - Python - - Scala - - R - type: string - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - required: - - secretName - - shareName - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - monitors - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeID: - type: string - required: - - volumeID - type: object - configMap: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - csi: - properties: - driver: - type: string - fsType: - type: string - nodePublishSecretRef: - properties: - name: - type: string - type: object - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - required: - - driver - type: object - downwardAPI: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - spec: - properties: - accessModes: - items: - type: string - type: array - resources: - properties: - requests: - properties: - storage: - type: string - type: object - type: object - storageClassName: - type: string - type: object - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - format: int32 - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - required: - - driver - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - pdName: - type: string - readOnly: - type: boolean - required: - - pdName - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - required: - - repository - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - required: - - endpoints - - path - type: object - hostPath: - properties: - path: - type: string - type: - type: string - required: - - path - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - format: int32 - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - targetPortal: - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - required: - - pdID - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - projected: - properties: - defaultMode: - format: int32 - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - serviceAccountToken: - properties: - audience: - type: string - expirationSeconds: - format: int64 - type: integer - path: - type: string - required: - - path - type: object - type: object - type: array - required: - - sources - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - required: - - registry - - volume - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - image - - monitors - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: - type: string - storagePolicyID: - type: string - storagePolicyName: - type: string - volumePath: - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - required: - - driver - - executor - - sparkVersion - - type - type: object - required: - - schedule - - template - type: object - status: - properties: - lastRun: - format: date-time - nullable: true - type: string - lastRunName: - type: string - nextRun: - format: date-time - nullable: true - type: string - pastFailedRunNames: - items: - type: string - type: array - pastSuccessfulRunNames: - items: - type: string - type: array - reason: - type: string - scheduleState: - type: string - type: object - required: - - metadata - - spec - type: object - -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml deleted file mode 100644 index c67bb2afa..000000000 --- a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ /dev/null @@ -1,4506 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: (unknown) - api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 - name: sparkapplications.sparkoperator.k8s.io -spec: - group: sparkoperator.k8s.io - names: - kind: SparkApplication - listKind: SparkApplicationList - plural: sparkapplications - shortNames: - - sparkapp - singular: sparkapplication - scope: Namespaced - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: { } - additionalPrinterColumns: - - jsonPath: .status.applicationState.state - name: Status - type: string - - jsonPath: .status.executionAttempts - name: Attempts - type: string - - jsonPath: .status.lastSubmissionAttemptTime - name: Start - type: string - - jsonPath: .status.terminationTime - name: Finish - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - arguments: - items: - type: string - type: array - batchScheduler: - type: string - batchSchedulerOptions: - properties: - priorityClassName: - type: string - queue: - type: string - resources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - deps: - properties: - excludePackages: - items: - type: string - type: array - files: - items: - type: string - type: array - jars: - items: - type: string - type: array - packages: - items: - type: string - type: array - pyFiles: - items: - type: string - type: array - repositories: - items: - type: string - type: array - type: object - driver: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: - type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - javaOptions: - type: string - kubernetesMaster: - type: string - labels: - additionalProperties: - type: string - type: object - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podName: - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' - type: string - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - type: object - dynamicAllocation: - properties: - enabled: - type: boolean - initialExecutors: - format: int32 - type: integer - maxExecutors: - format: int32 - type: integer - minExecutors: - format: int32 - type: integer - shuffleTrackingTimeout: - format: int64 - type: integer - type: object - executor: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - deleteOnTermination: - type: boolean - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: - type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - instances: - format: int32 - minimum: 1 - type: integer - javaOptions: - type: string - labels: - additionalProperties: - type: string - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - type: object - failureRetries: - format: int32 - type: integer - hadoopConf: - additionalProperties: - type: string - type: object - hadoopConfigMap: - type: string - image: - type: string - imagePullPolicy: - type: string - imagePullSecrets: - items: - type: string - type: array - mainApplicationFile: - type: string - mainClass: - type: string - memoryOverheadFactor: - type: string - mode: - enum: - - cluster - - client - type: string - monitoring: - properties: - exposeDriverMetrics: - type: boolean - exposeExecutorMetrics: - type: boolean - metricsProperties: - type: string - metricsPropertiesFile: - type: string - prometheus: - properties: - configFile: - type: string - configuration: - type: string - jmxExporterJar: - type: string - port: - format: int32 - maximum: 49151 - minimum: 1024 - type: integer - portName: - type: string - required: - - jmxExporterJar - type: object - required: - - exposeDriverMetrics - - exposeExecutorMetrics - type: object - nodeSelector: - additionalProperties: - type: string - type: object - proxyUser: - type: string - pythonVersion: - enum: - - "2" - - "3" - type: string - restartPolicy: - properties: - onFailureRetries: - format: int32 - minimum: 0 - type: integer - onFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - onSubmissionFailureRetries: - format: int32 - minimum: 0 - type: integer - onSubmissionFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - type: - enum: - - Never - - Always - - OnFailure - type: string - type: object - retryInterval: - format: int64 - type: integer - sparkConf: - additionalProperties: - type: string - type: object - sparkConfigMap: - type: string - sparkUIOptions: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string - type: object - driverIngressOptions: - items: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressURLFormat: - type: string - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string - type: object - type: array - sparkVersion: - type: string - timeToLiveSeconds: - format: int64 - type: integer - type: - enum: - - Java - - Python - - Scala - - R - type: string - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - required: - - secretName - - shareName - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - monitors - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeID: - type: string - required: - - volumeID - type: object - configMap: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - csi: - properties: - driver: - type: string - fsType: - type: string - nodePublishSecretRef: - properties: - name: - type: string - type: object - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - required: - - driver - type: object - downwardAPI: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - spec: - properties: - accessModes: - items: - type: string - type: array - resources: - properties: - requests: - properties: - storage: - type: string - type: object - type: object - storageClassName: - type: string - type: object - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - format: int32 - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - required: - - driver - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - pdName: - type: string - readOnly: - type: boolean - required: - - pdName - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - required: - - repository - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - required: - - endpoints - - path - type: object - hostPath: - properties: - path: - type: string - type: - type: string - required: - - path - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - format: int32 - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - targetPortal: - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - required: - - pdID - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - projected: - properties: - defaultMode: - format: int32 - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - serviceAccountToken: - properties: - audience: - type: string - expirationSeconds: - format: int64 - type: integer - path: - type: string - required: - - path - type: object - type: object - type: array - required: - - sources - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - required: - - registry - - volume - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - image - - monitors - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: - type: string - storagePolicyID: - type: string - storagePolicyName: - type: string - volumePath: - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - required: - - driver - - executor - - sparkVersion - - type - type: object - status: - properties: - applicationState: - properties: - errorMessage: - type: string - state: - type: string - required: - - state - type: object - driverInfo: - properties: - podName: - type: string - webUIAddress: - type: string - webUIIngressAddress: - type: string - webUIIngressName: - type: string - webUIPort: - format: int32 - type: integer - webUIServiceName: - type: string - type: object - executionAttempts: - format: int32 - type: integer - executorState: - additionalProperties: - type: string - type: object - lastSubmissionAttemptTime: - format: date-time - nullable: true - type: string - sparkApplicationId: - type: string - submissionAttempts: - format: int32 - type: integer - submissionID: - type: string - terminationTime: - format: date-time - nullable: true - type: string - required: - - driverInfo - type: object - required: - - metadata - - spec - type: object -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/manifest/spark-application-rbac/kustomization.yaml b/manifest/spark-application-rbac/kustomization.yaml deleted file mode 100644 index 1e4e490c9..000000000 --- a/manifest/spark-application-rbac/kustomization.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: spark - -resources: - - spark-application-rbac.yaml diff --git a/manifest/spark-application-rbac/spark-application-rbac.yaml b/manifest/spark-application-rbac/spark-application-rbac.yaml deleted file mode 100644 index 662f227d1..000000000 --- a/manifest/spark-application-rbac/spark-application-rbac.yaml +++ /dev/null @@ -1,52 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -apiVersion: v1 -kind: Namespace -metadata: - name: spark ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: spark - namespace: spark ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - namespace: spark - name: spark-role -rules: -- apiGroups: [""] - resources: ["pods"] - verbs: ["*"] -- apiGroups: [""] - resources: ["services"] - verbs: ["*"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: spark-role-binding - namespace: spark -subjects: -- kind: ServiceAccount - name: spark - namespace: spark -roleRef: - kind: Role - name: spark-role - apiGroup: rbac.authorization.k8s.io diff --git a/manifest/spark-operator-install/kustomization.yaml b/manifest/spark-operator-install/kustomization.yaml deleted file mode 100644 index 1d102d262..000000000 --- a/manifest/spark-operator-install/kustomization.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: spark-operator - -resources: - - spark-operator-rbac.yaml - - ../crds - - spark-operator.yaml diff --git a/manifest/spark-operator-install/spark-operator-rbac.yaml b/manifest/spark-operator-install/spark-operator-rbac.yaml deleted file mode 100644 index 71a053b48..000000000 --- a/manifest/spark-operator-install/spark-operator-rbac.yaml +++ /dev/null @@ -1,97 +0,0 @@ -# -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: v1 -kind: Namespace -metadata: - name: spark-operator ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: sparkoperator - namespace: spark-operator ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: sparkoperator -rules: -- apiGroups: [""] - resources: ["pods"] - verbs: ["*"] -- apiGroups: [""] - resources: ["configmaps"] - verbs: ["*"] -- apiGroups: [""] - resources: ["services", "secrets"] - verbs: ["create", "get", "delete"] -- apiGroups: ["extensions"] - resources: ["ingresses"] - verbs: ["create", "get", "delete"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get"] -- apiGroups: [""] - resources: ["resourcequotas"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["events"] - verbs: ["create", "update", "patch"] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["create", "get", "update", "delete"] -- apiGroups: ["admissionregistration.k8s.io"] - resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] - verbs: ["create", "get", "update", "delete"] -- apiGroups: ["sparkoperator.k8s.io"] - resources: ["sparkapplications", "scheduledsparkapplications", "sparkapplications/status", "scheduledsparkapplications/status", "sparkapplications/finalizers", "scheduledsparkapplications/finalizers"] - verbs: ["*"] -- apiGroups: ["scheduling.volcano.sh"] - resources: ["podgroups", "queues", "queues/status"] - verbs: ["get", "list", "watch", "create", "delete", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: sparkoperator -subjects: - - kind: ServiceAccount - name: sparkoperator - namespace: spark-operator -roleRef: - kind: ClusterRole - name: sparkoperator - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" - name: sparkoperator-aggregate-to-admin -rules: -- apiGroups: ["sparkoperator.k8s.io"] - resources: ["sparkapplications", "scheduledsparkapplications"] - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch diff --git a/manifest/spark-operator-install/spark-operator.yaml b/manifest/spark-operator-install/spark-operator.yaml deleted file mode 100644 index b4b31d1ad..000000000 --- a/manifest/spark-operator-install/spark-operator.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkoperator - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - strategy: - type: Recreate - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - spec: - serviceAccountName: sparkoperator - containers: - - name: sparkoperator - image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 - imagePullPolicy: Always - args: - - -logtostderr diff --git a/manifest/spark-operator-with-webhook-install/kustomization.yaml b/manifest/spark-operator-with-webhook-install/kustomization.yaml deleted file mode 100644 index ec3b237e7..000000000 --- a/manifest/spark-operator-with-webhook-install/kustomization.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: spark-operator - -resources: - - ../spark-operator-install - - spark-operator-webhook.yaml - -patchesStrategicMerge: - - spark-operator-patch.yaml diff --git a/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml b/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml deleted file mode 100644 index e752063c8..000000000 --- a/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkoperator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - namespace: spark-operator -spec: - template: - spec: - volumes: - - name: webhook-certs - secret: - secretName: spark-webhook-certs - containers: - - name: sparkoperator - args: - - -logtostderr - - -enable-webhook=true - - -v=2 - volumeMounts: - - name: webhook-certs - mountPath: /etc/webhook-certs - diff --git a/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml b/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml deleted file mode 100644 index eaad8660d..000000000 --- a/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: batch/v1 -kind: Job -metadata: - name: sparkoperator-init - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 -spec: - backoffLimit: 3 - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - spec: - serviceAccountName: sparkoperator - restartPolicy: Never - containers: - - name: main - image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 - imagePullPolicy: IfNotPresent - command: ["/usr/bin/gencerts.sh", "-p"] ---- -kind: Service -apiVersion: v1 -metadata: - name: spark-webhook - namespace: spark-operator -spec: - ports: - - port: 443 - targetPort: 8080 - name: webhook - selector: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 diff --git a/manifest/spark-operator-with-webhook-install/spark-operator-with-webhook.yaml b/manifest/spark-operator-with-webhook-install/spark-operator-with-webhook.yaml deleted file mode 100644 index 5e4f31805..000000000 --- a/manifest/spark-operator-with-webhook-install/spark-operator-with-webhook.yaml +++ /dev/null @@ -1,96 +0,0 @@ -# -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkoperator - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 - strategy: - type: Recreate - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 - initializers: - pending: [] - spec: - serviceAccountName: sparkoperator - volumes: - - name: webhook-certs - secret: - secretName: spark-webhook-certs - containers: - - name: sparkoperator - image: gcr.io/spark-operator/spark-operator:v2.4.0-v1beta1-latest - imagePullPolicy: Always - volumeMounts: - - name: webhook-certs - mountPath: /etc/webhook-certs - ports: - - containerPort: 8080 - args: - - -logtostderr - - -enable-webhook=true - - -v=2 ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: sparkoperator-init - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 -spec: - backoffLimit: 3 - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 - spec: - serviceAccountName: sparkoperator - restartPolicy: Never - containers: - - name: main - image: gcr.io/spark-operator/spark-operator:v2.4.0-v1beta1-latest - imagePullPolicy: IfNotPresent - command: ["/usr/bin/gencerts.sh", "-p"] ---- -kind: Service -apiVersion: v1 -metadata: - name: spark-webhook - namespace: spark-operator -spec: - ports: - - port: 443 - targetPort: 8080 - name: webhook - selector: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 diff --git a/pkg/batchscheduler/scheduler_manager.go b/pkg/batchscheduler/scheduler_manager.go deleted file mode 100644 index 41ff744b0..000000000 --- a/pkg/batchscheduler/scheduler_manager.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2019 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package batchscheduler - -import ( - "fmt" - "sync" - - "k8s.io/client-go/rest" - - "github.com/kubeflow/spark-operator/pkg/batchscheduler/interface" - "github.com/kubeflow/spark-operator/pkg/batchscheduler/volcano" -) - -type schedulerInitializeFunc func(config *rest.Config) (schedulerinterface.BatchScheduler, error) - -var schedulerContainers = map[string]schedulerInitializeFunc{ - volcano.GetPluginName(): volcano.New, -} - -func GetRegisteredNames() []string { - var pluginNames []string - for key := range schedulerContainers { - pluginNames = append(pluginNames, key) - } - return pluginNames -} - -type SchedulerManager struct { - sync.Mutex - config *rest.Config - plugins map[string]schedulerinterface.BatchScheduler -} - -func NewSchedulerManager(config *rest.Config) *SchedulerManager { - manager := SchedulerManager{ - config: config, - plugins: make(map[string]schedulerinterface.BatchScheduler), - } - return &manager -} - -func (batch *SchedulerManager) GetScheduler(schedulerName string) (schedulerinterface.BatchScheduler, error) { - iniFunc, registered := schedulerContainers[schedulerName] - if !registered { - return nil, fmt.Errorf("unregistered scheduler plugin %s", schedulerName) - } - - batch.Lock() - defer batch.Unlock() - - if plugin, existed := batch.plugins[schedulerName]; existed && plugin != nil { - return plugin, nil - } else if existed && plugin == nil { - return nil, fmt.Errorf( - "failed to get scheduler plugin %s, previous initialization has failed", schedulerName) - } else { - if plugin, err := iniFunc(batch.config); err != nil { - batch.plugins[schedulerName] = nil - return nil, err - } else { - batch.plugins[schedulerName] = plugin - return plugin, nil - } - } -} diff --git a/pkg/batchscheduler/volcano/volcano_scheduler.go b/pkg/batchscheduler/volcano/volcano_scheduler.go deleted file mode 100644 index a232784c7..000000000 --- a/pkg/batchscheduler/volcano/volcano_scheduler.go +++ /dev/null @@ -1,307 +0,0 @@ -/* -Copyright 2019 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volcano - -import ( - "context" - "fmt" - - corev1 "k8s.io/api/core/v1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" - - "volcano.sh/apis/pkg/apis/scheduling/v1beta1" - volcanoclient "volcano.sh/apis/pkg/client/clientset/versioned" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - schedulerinterface "github.com/kubeflow/spark-operator/pkg/batchscheduler/interface" -) - -const ( - PodGroupName = "podgroups.scheduling.volcano.sh" -) - -type VolcanoBatchScheduler struct { - extensionClient apiextensionsclient.Interface - volcanoClient volcanoclient.Interface -} - -func GetPluginName() string { - return "volcano" -} - -func (v *VolcanoBatchScheduler) Name() string { - return GetPluginName() -} - -func (v *VolcanoBatchScheduler) ShouldSchedule(app *v1beta2.SparkApplication) bool { - //NOTE: There is no additional requirement for volcano scheduler - return true -} - -func (v *VolcanoBatchScheduler) DoBatchSchedulingOnSubmission(app *v1beta2.SparkApplication) error { - if app.Spec.Executor.Annotations == nil { - app.Spec.Executor.Annotations = make(map[string]string) - } - - if app.Spec.Driver.Annotations == nil { - app.Spec.Driver.Annotations = make(map[string]string) - } - - if app.Spec.Mode == v1beta2.ClientMode { - return v.syncPodGroupInClientMode(app) - } else if app.Spec.Mode == v1beta2.ClusterMode { - return v.syncPodGroupInClusterMode(app) - } - return nil -} - -func (v *VolcanoBatchScheduler) syncPodGroupInClientMode(app *v1beta2.SparkApplication) error { - // We only care about the executor pods in client mode - if _, ok := app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey]; !ok { - totalResource := getExecutorRequestResource(app) - - if app.Spec.BatchSchedulerOptions != nil && len(app.Spec.BatchSchedulerOptions.Resources) > 0 { - totalResource = app.Spec.BatchSchedulerOptions.Resources - } - if err := v.syncPodGroup(app, 1, totalResource); err == nil { - app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey] = v.getAppPodGroupName(app) - } else { - return err - } - } - return nil -} - -func (v *VolcanoBatchScheduler) syncPodGroupInClusterMode(app *v1beta2.SparkApplication) error { - //We need both mark Driver and Executor when submitting - //NOTE: In cluster mode, the initial size of PodGroup is set to 1 in order to schedule driver pod first. - if _, ok := app.Spec.Driver.Annotations[v1beta1.KubeGroupNameAnnotationKey]; !ok { - //Both driver and executor resource will be considered. - totalResource := sumResourceList([]corev1.ResourceList{getExecutorRequestResource(app), getDriverRequestResource(app)}) - - if app.Spec.BatchSchedulerOptions != nil && len(app.Spec.BatchSchedulerOptions.Resources) > 0 { - totalResource = app.Spec.BatchSchedulerOptions.Resources - } - if err := v.syncPodGroup(app, 1, totalResource); err == nil { - app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey] = v.getAppPodGroupName(app) - app.Spec.Driver.Annotations[v1beta1.KubeGroupNameAnnotationKey] = v.getAppPodGroupName(app) - } else { - return err - } - } - return nil -} - -func (v *VolcanoBatchScheduler) getAppPodGroupName(app *v1beta2.SparkApplication) string { - return fmt.Sprintf("spark-%s-pg", app.Name) -} - -func (v *VolcanoBatchScheduler) syncPodGroup(app *v1beta2.SparkApplication, size int32, minResource corev1.ResourceList) error { - var ( - err error - pg *v1beta1.PodGroup - ) - podGroupName := v.getAppPodGroupName(app) - if pg, err = v.volcanoClient.SchedulingV1beta1().PodGroups(app.Namespace).Get(context.TODO(), podGroupName, metav1.GetOptions{}); err != nil { - if !errors.IsNotFound(err) { - return err - } - podGroup := v1beta1.PodGroup{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: app.Namespace, - Name: podGroupName, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(app, v1beta2.SchemeGroupVersion.WithKind("SparkApplication")), - }, - }, - Spec: v1beta1.PodGroupSpec{ - MinMember: size, - MinResources: &minResource, - }, - Status: v1beta1.PodGroupStatus{ - Phase: v1beta1.PodGroupPending, - }, - } - - if app.Spec.BatchSchedulerOptions != nil { - //Update pod group queue if it's specified in Spark Application - if app.Spec.BatchSchedulerOptions.Queue != nil { - podGroup.Spec.Queue = *app.Spec.BatchSchedulerOptions.Queue - } - //Update pod group priorityClassName if it's specified in Spark Application - if app.Spec.BatchSchedulerOptions.PriorityClassName != nil { - podGroup.Spec.PriorityClassName = *app.Spec.BatchSchedulerOptions.PriorityClassName - } - } - _, err = v.volcanoClient.SchedulingV1beta1().PodGroups(app.Namespace).Create(context.TODO(), &podGroup, metav1.CreateOptions{}) - } else { - if pg.Spec.MinMember != size { - pg.Spec.MinMember = size - _, err = v.volcanoClient.SchedulingV1beta1().PodGroups(app.Namespace).Update(context.TODO(), pg, metav1.UpdateOptions{}) - } - } - if err != nil { - return fmt.Errorf("failed to sync PodGroup with error: %s. Abandon schedule pods via volcano", err) - } - return nil -} - -func (v *VolcanoBatchScheduler) CleanupOnCompletion(app *v1beta2.SparkApplication) error { - podGroupName := v.getAppPodGroupName(app) - err := v.volcanoClient.SchedulingV1beta1().PodGroups(app.Namespace).Delete(context.TODO(), podGroupName, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { - return err - } - return nil -} - -func New(config *rest.Config) (schedulerinterface.BatchScheduler, error) { - vkClient, err := volcanoclient.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("failed to initialize volcano client with error %v", err) - } - extClient, err := apiextensionsclient.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("failed to initialize k8s extension client with error %v", err) - } - - if _, err := extClient.ApiextensionsV1().CustomResourceDefinitions().Get( - context.TODO(), - PodGroupName, - metav1.GetOptions{}, - ); err != nil { - //For backward compatibility check v1beta1 API version of CustomResourceDefinitions - if _, err := extClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get( - context.TODO(), - PodGroupName, - metav1.GetOptions{}, - ); err != nil { - return nil, fmt.Errorf("podGroup CRD is required to exists in current cluster error: %s", err) - } - } - return &VolcanoBatchScheduler{ - extensionClient: extClient, - volcanoClient: vkClient, - }, nil -} - -func getExecutorRequestResource(app *v1beta2.SparkApplication) corev1.ResourceList { - minResource := corev1.ResourceList{} - - //CoreRequest correspond to executor's core request - if app.Spec.Executor.CoreRequest != nil { - if value, err := resource.ParseQuantity(*app.Spec.Executor.CoreRequest); err == nil { - minResource[corev1.ResourceCPU] = value - } - } - - //Use Core attribute if CoreRequest is empty - if app.Spec.Executor.Cores != nil { - if _, ok := minResource[corev1.ResourceCPU]; !ok { - if value, err := resource.ParseQuantity(fmt.Sprintf("%d", *app.Spec.Executor.Cores)); err == nil { - minResource[corev1.ResourceCPU] = value - } - } - } - - //CoreLimit correspond to executor's core limit, this attribute will be used only when core request is empty. - if app.Spec.Executor.CoreLimit != nil { - if _, ok := minResource[corev1.ResourceCPU]; !ok { - if value, err := resource.ParseQuantity(*app.Spec.Executor.CoreLimit); err == nil { - minResource[corev1.ResourceCPU] = value - } - } - } - - //Memory + MemoryOverhead correspond to executor's memory request - if app.Spec.Executor.Memory != nil { - if value, err := resource.ParseQuantity(*app.Spec.Executor.Memory); err == nil { - minResource[corev1.ResourceMemory] = value - } - } - if app.Spec.Executor.MemoryOverhead != nil { - if value, err := resource.ParseQuantity(*app.Spec.Executor.MemoryOverhead); err == nil { - if existing, ok := minResource[corev1.ResourceMemory]; ok { - existing.Add(value) - minResource[corev1.ResourceMemory] = existing - } - } - } - - resourceList := []corev1.ResourceList{{}} - for i := int32(0); i < *app.Spec.Executor.Instances; i++ { - resourceList = append(resourceList, minResource) - } - return sumResourceList(resourceList) -} - -func getDriverRequestResource(app *v1beta2.SparkApplication) corev1.ResourceList { - minResource := corev1.ResourceList{} - - //Cores correspond to driver's core request - if app.Spec.Driver.Cores != nil { - if value, err := resource.ParseQuantity(fmt.Sprintf("%d", *app.Spec.Driver.Cores)); err == nil { - minResource[corev1.ResourceCPU] = value - } - } - - //CoreLimit correspond to driver's core limit, this attribute will be used only when core request is empty. - if app.Spec.Driver.CoreLimit != nil { - if _, ok := minResource[corev1.ResourceCPU]; !ok { - if value, err := resource.ParseQuantity(*app.Spec.Driver.CoreLimit); err == nil { - minResource[corev1.ResourceCPU] = value - } - } - } - - //Memory + MemoryOverhead correspond to driver's memory request - if app.Spec.Driver.Memory != nil { - if value, err := resource.ParseQuantity(*app.Spec.Driver.Memory); err == nil { - minResource[corev1.ResourceMemory] = value - } - } - if app.Spec.Driver.MemoryOverhead != nil { - if value, err := resource.ParseQuantity(*app.Spec.Driver.MemoryOverhead); err == nil { - if existing, ok := minResource[corev1.ResourceMemory]; ok { - existing.Add(value) - minResource[corev1.ResourceMemory] = existing - } - } - } - - return minResource -} - -func sumResourceList(list []corev1.ResourceList) corev1.ResourceList { - totalResource := corev1.ResourceList{} - for _, l := range list { - for name, quantity := range l { - - if value, ok := totalResource[name]; !ok { - totalResource[name] = quantity.DeepCopy() - } else { - value.Add(quantity) - totalResource[name] = value - } - } - } - return totalResource -} diff --git a/pkg/certificate/certificate.go b/pkg/certificate/certificate.go new file mode 100644 index 000000000..322faeb1e --- /dev/null +++ b/pkg/certificate/certificate.go @@ -0,0 +1,307 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate + +import ( + "context" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "net" + "os" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/cert" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kubeflow/spark-operator/pkg/common" +) + +const ( + Organization = "spark-operator" +) + +// Provider is a container of a X509 certificate file and a corresponding key file for the +// webhook server, and a CA certificate file for the API server to verify the server certificate. +type Provider struct { + client client.Client + commonName string + caKey *rsa.PrivateKey + caCert *x509.Certificate + serverKey *rsa.PrivateKey + serverCert *x509.Certificate +} + +// NewProvider creates a new Provider instance. +func NewProvider(client client.Client, name, namespace string) *Provider { + commonName := fmt.Sprintf("%s.%s.svc", name, namespace) + certProvider := Provider{ + client: client, + commonName: commonName, + } + return &certProvider +} + +// SyncSecret syncs the secret containing the certificates to the given name and namespace. +func (cp *Provider) SyncSecret(ctx context.Context, name, namespace string) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + if err := cp.client.Get(ctx, key, secret); err != nil { + if !errors.IsNotFound(err) { + return err + } + if err := cp.client.Create(ctx, secret); err != nil { + if errors.IsAlreadyExists(err) { + return err + } + return fmt.Errorf("failed to create secret: %v", err) + } + } + + if len(secret.Data[common.CAKeyPem]) == 0 || + len(secret.Data[common.CACertPem]) == 0 || + len(secret.Data[common.ServerCertPem]) == 0 || + len(secret.Data[common.ServerKeyPem]) == 0 { + if err := cp.Generate(); err != nil { + return fmt.Errorf("failed to generate certificate: %v", err) + } + if err := cp.updateSecret(ctx, secret); err != nil { + return err + } + return nil + } + return cp.parseSecret(secret) +} + +// CAKey returns the PEM-encoded CA private key. +func (cp *Provider) CAKey() ([]byte, error) { + if cp.caKey == nil { + return nil, fmt.Errorf("CA key is not set") + } + data := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(cp.caKey), + }) + return data, nil +} + +// CACert returns the PEM-encoded CA certificate. +func (cp *Provider) CACert() ([]byte, error) { + if cp.caCert == nil { + return nil, fmt.Errorf("CA certificate is not set") + } + data := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cp.caCert.Raw, + }) + return data, nil +} + +// ServerKey returns the PEM-encoded server private key. +func (cp *Provider) ServerKey() ([]byte, error) { + if cp.serverKey == nil { + return nil, fmt.Errorf("server key is not set") + } + data := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(cp.serverKey), + }) + return data, nil +} + +// ServerCert returns the PEM-encoded server cert. +func (cp *Provider) ServerCert() ([]byte, error) { + if cp.serverCert == nil { + return nil, fmt.Errorf("server cert is not set") + } + data := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cp.serverCert.Raw, + }) + return data, nil +} + +// TLSConfig returns the TLS configuration. +func (cp *Provider) TLSConfig() (*tls.Config, error) { + keyPEMBlock, err := cp.ServerKey() + if err != nil { + return nil, fmt.Errorf("failed to get server key: %v", err) + } + + certPEMBlock, err := cp.ServerCert() + if err != nil { + return nil, fmt.Errorf("failed to get server certificate: %v", err) + } + + tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, fmt.Errorf("failed to generate TLS certificate: %v", err) + } + + cfg := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + } + return cfg, nil +} + +// WriteFile saves the certificate and key to the given path. +func (cp *Provider) WriteFile(path, certName, keyName string) error { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + serverCert, err := cp.ServerCert() + if err != nil { + return err + } + serverKey, err := cp.ServerKey() + if err != nil { + return err + } + if err := os.WriteFile(path+"/"+certName, serverCert, 0600); err != nil { + return err + } + if err := os.WriteFile(path+"/"+keyName, serverKey, 0600); err != nil { + return err + } + return nil +} + +func (cp *Provider) Generate() error { + // Generate CA private caKey + caKey, err := NewPrivateKey() + if err != nil { + return fmt.Errorf("failed to generate CA private key: %v", err) + } + + // Generate self-signed CA certificate + caCfg := cert.Config{ + CommonName: cp.commonName, + Organization: []string{Organization}, + } + caCert, err := cert.NewSelfSignedCACert(caCfg, caKey) + if err != nil { + return fmt.Errorf("failed to generate self-signed CA certificate: %v", err) + } + + // Generate server private key + serverKey, err := NewPrivateKey() + if err != nil { + return fmt.Errorf("failed to generate server private key: %v", err) + } + + // Generate signed server certificate + var ips []net.IP + dnsNames := []string{"localhost"} + hostIP := net.ParseIP(cp.commonName) + if hostIP.To4() != nil { + ips = append(ips, hostIP.To4()) + } else { + dnsNames = append(dnsNames, cp.commonName) + } + serverCfg := cert.Config{ + CommonName: cp.commonName, + Organization: []string{Organization}, + AltNames: cert.AltNames{IPs: ips, DNSNames: dnsNames}, + } + serverCert, err := NewSignedServerCert(serverCfg, caKey, caCert, serverKey) + if err != nil { + return fmt.Errorf("failed to generate signed server certificate: %v", err) + } + + cp.caKey = caKey + cp.caCert = caCert + cp.serverKey = serverKey + cp.serverCert = serverCert + return nil +} + +func (cp *Provider) parseSecret(secret *corev1.Secret) error { + if secret == nil { + return fmt.Errorf("secret is nil") + } + caKeyPem, _ := pem.Decode(secret.Data[common.CAKeyPem]) + caCertPem, _ := pem.Decode(secret.Data[common.CACertPem]) + serverKeyPem, _ := pem.Decode(secret.Data[common.ServerKeyPem]) + serverCertPem, _ := pem.Decode(secret.Data[common.ServerCertPem]) + if caKeyPem == nil || caCertPem == nil || serverKeyPem == nil || serverCertPem == nil { + return fmt.Errorf("failed to decode secret data to pem block") + } + caKey, err := x509.ParsePKCS1PrivateKey(caKeyPem.Bytes) + if err != nil { + return fmt.Errorf("failed to parse CA private key: %v", err) + } + caCert, err := x509.ParseCertificate(caCertPem.Bytes) + if err != nil { + return fmt.Errorf("failed to prase CA certificate: %v", err) + } + serverKey, err := x509.ParsePKCS1PrivateKey(serverKeyPem.Bytes) + if err != nil { + return fmt.Errorf("failed to parse server private key: %v", err) + } + serverCert, err := x509.ParseCertificate(serverCertPem.Bytes) + if err != nil { + return fmt.Errorf("failed to parse server certificate: %v", err) + } + cp.caKey = caKey + cp.caCert = caCert + cp.serverKey = serverKey + cp.serverCert = serverCert + return nil +} + +func (cp *Provider) updateSecret(ctx context.Context, secret *corev1.Secret) error { + caKey, err := cp.CAKey() + if err != nil { + return fmt.Errorf("failed to get CA key: %v", err) + } + caCert, err := cp.CACert() + if err != nil { + return fmt.Errorf("failed to get CA certificate: %v", err) + } + serverKey, err := cp.ServerKey() + if err != nil { + return fmt.Errorf("failed to get server key: %v", err) + } + serverCert, err := cp.ServerCert() + if err != nil { + return fmt.Errorf("failed to get server certificate: %v", err) + } + if secret.Data == nil { + secret.Data = make(map[string][]byte) + } + secret.Data[common.CAKeyPem] = caKey + secret.Data[common.CACertPem] = caCert + secret.Data[common.ServerKeyPem] = serverKey + secret.Data[common.ServerCertPem] = serverCert + if err := cp.client.Update(ctx, secret); err != nil { + return err + } + return nil +} diff --git a/pkg/certificate/certificate_test.go b/pkg/certificate/certificate_test.go new file mode 100644 index 000000000..f0f416279 --- /dev/null +++ b/pkg/certificate/certificate_test.go @@ -0,0 +1,175 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate_test + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/kubeflow/spark-operator/pkg/certificate" + "github.com/kubeflow/spark-operator/pkg/common" +) + +var _ = Describe("Certificate Provider", func() { + Context("Generate new certificates", func() { + secretName := "spark-operator-webhook-secret" + secretNamespace := "default" + + var cp *certificate.Provider + + BeforeEach(func() { + By("Creating a new cert provider") + cp = certificate.NewProvider(k8sClient, secretName, secretNamespace) + Expect(cp).NotTo(BeNil()) + + By("Generating new certificates") + Expect(cp.Generate()).To(Succeed()) + }) + + It("Should generate new CA key", func() { + caKey, err := cp.CAKey() + Expect(err).To(BeNil()) + Expect(caKey).NotTo(BeEmpty()) + }) + + It("Should generate new CA certificate", func() { + caCert, err := cp.CACert() + Expect(err).To(BeNil()) + Expect(caCert).NotTo(BeEmpty()) + }) + + It("Should generate new server key", func() { + serverKey, err := cp.ServerKey() + Expect(err).To(BeNil()) + Expect(serverKey).NotTo(BeEmpty()) + }) + + It("Should generate new server certificate", func() { + serverCert, err := cp.ServerCert() + Expect(err).To(BeNil()) + Expect(serverCert).NotTo(BeEmpty()) + }) + + It("Should generate new TLS config", func() { + cfg, err := cp.ServerCert() + Expect(err).To(BeNil()) + Expect(cfg).NotTo(BeEmpty()) + }) + }) + + Context("The data of webhook secret is empty", func() { + ctx := context.Background() + secretName := "spark-operator-webhook-secret" + secretNamespace := "default" + key := types.NamespacedName{ + Namespace: secretNamespace, + Name: secretName, + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNamespace, + }, + } + + BeforeEach(func() { + By("Creating a new webhook secret with empty data") + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) + }) + + AfterEach(func() { + By("Deleting the webhook secret") + Expect(k8sClient.Delete(ctx, secret)).To(Succeed()) + }) + + It("Should generate new certificates and update webhook secret", func() { + By("Creating a new CertProvider") + cp := certificate.NewProvider(k8sClient, secretName, secretNamespace) + Expect(cp.SyncSecret(context.TODO(), secretName, secretNamespace)).To(Succeed()) + + By("Checking out whether the data of webhook secret is populated") + Expect(k8sClient.Get(ctx, key, secret)).To(Succeed()) + Expect(secret.Data[common.CAKeyPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.CACertPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.ServerKeyPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.ServerCertPem]).NotTo(BeEmpty()) + }) + }) + + Context("The data of webhook secret is already populated", func() { + ctx := context.Background() + secretName := "spark-operator-webhook-secret" + secretNamespace := "default" + key := types.NamespacedName{ + Name: secretName, + Namespace: secretNamespace, + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNamespace, + }, + } + + BeforeEach(func() { + By("Creating a new webhook secret with data populated") + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) + + By("Creating a new CertProvider and synchronize generated certificates to webhook secret") + cp := certificate.NewProvider(k8sClient, secretName, secretNamespace) + Expect(cp.SyncSecret(context.TODO(), secretName, secretNamespace)).To(Succeed()) + + By("Creating a new webhook secret with data populated") + Expect(k8sClient.Get(ctx, key, secret)).To(Succeed()) + Expect(secret.Data[common.CAKeyPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.CACertPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.ServerKeyPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.ServerCertPem]).NotTo(BeEmpty()) + }) + + AfterEach(func() { + By("Deleting the webhook secret") + Expect(k8sClient.Delete(ctx, secret)).To(Succeed()) + }) + + It("Should synchronize webhook certificates data", func() { + By("Creating a new cert provider and synchronize generated certificates to webhook secret") + cp := certificate.NewProvider(k8sClient, secretName, secretNamespace) + Expect(cp.SyncSecret(context.TODO(), secretName, secretNamespace)).To(Succeed()) + + By("Checking out whether the webhook certificates is synchronized into the cert provider") + caKey, err := cp.CAKey() + Expect(err).To(BeNil()) + Expect(caKey).To(Equal(secret.Data[common.CAKeyPem])) + caCert, err := cp.CACert() + Expect(err).To(BeNil()) + Expect(caCert).To(Equal(secret.Data[common.CACertPem])) + serverKey, err := cp.ServerKey() + Expect(err).To(BeNil()) + Expect(serverKey).To(Equal(secret.Data[common.ServerKeyPem])) + serverCert, err := cp.ServerCert() + Expect(err).To(BeNil()) + Expect(serverCert).To(Equal(secret.Data[common.ServerCertPem])) + }) + }) +}) diff --git a/pkg/apis/sparkoperator.k8s.io/register.go b/pkg/certificate/doc.go similarity index 84% rename from pkg/apis/sparkoperator.k8s.io/register.go rename to pkg/certificate/doc.go index 65762067b..082248576 100644 --- a/pkg/apis/sparkoperator.k8s.io/register.go +++ b/pkg/certificate/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +14,4 @@ See the License for the specific language governing permissions and limitations under the License. */ -package sparkoperator - -const ( - GroupName = "sparkoperator.k8s.io" -) +package certificate diff --git a/pkg/certificate/suite_test.go b/pkg/certificate/suite_test.go new file mode 100644 index 000000000..96fad9a2a --- /dev/null +++ b/pkg/certificate/suite_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate_test + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestCertProvider(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Certificate Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = v1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/pkg/util/cert.go b/pkg/certificate/util.go similarity index 72% rename from pkg/util/cert.go rename to pkg/certificate/util.go index 37188f3a3..635c89dde 100644 --- a/pkg/util/cert.go +++ b/pkg/certificate/util.go @@ -1,4 +1,20 @@ -package util +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate import ( "crypto/rand" @@ -11,14 +27,12 @@ import ( "time" "k8s.io/client-go/util/cert" -) -const ( - RSAKeySize = 2048 + "github.com/kubeflow/spark-operator/pkg/common" ) func NewPrivateKey() (*rsa.PrivateKey, error) { - key, err := rsa.GenerateKey(rand.Reader, RSAKeySize) + key, err := rsa.GenerateKey(rand.Reader, common.RSAKeySize) if err != nil { return nil, fmt.Errorf("failed to generate private key: %v", err) } diff --git a/pkg/certificate/util_test.go b/pkg/certificate/util_test.go new file mode 100644 index 000000000..d7a24e7dd --- /dev/null +++ b/pkg/certificate/util_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate_test + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "testing" + "time" + + "k8s.io/client-go/util/cert" + + "github.com/kubeflow/spark-operator/pkg/certificate" + "github.com/kubeflow/spark-operator/pkg/common" +) + +func TestNewPrivateKey(t *testing.T) { + _, err := certificate.NewPrivateKey() + if err != nil { + t.Errorf("failed to generate private key: %v", err) + } +} + +func TestNewSignedServerCert(t *testing.T) { + cfg := cert.Config{ + CommonName: "test-server", + Organization: []string{"test-org"}, + NotBefore: time.Now(), + } + + caKey, _ := rsa.GenerateKey(rand.Reader, common.RSAKeySize) + caCert := &x509.Certificate{} + serverKey, _ := rsa.GenerateKey(rand.Reader, common.RSAKeySize) + + serverCert, err := certificate.NewSignedServerCert(cfg, caKey, caCert, serverKey) + if err != nil { + t.Errorf("failed to generate signed server certificate: %v", err) + } + + if serverCert == nil { + t.Error("server certificate is nil") + } +} diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index 2ba94243f..6fd11d33c 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -21,8 +21,8 @@ limitations under the License. package fake import ( - sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" + sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index d12cb60d4..d765130f1 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -21,8 +21,8 @@ limitations under the License. package scheme import ( - sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" + sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go index ac41f935c..270b95ac8 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go index e8772e7ba..d8b61c686 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go index 65336a68e..9447017aa 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go @@ -24,7 +24,7 @@ import ( "context" "time" - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go index e4308e309..b63899134 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go @@ -24,7 +24,7 @@ import ( "context" "time" - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go index c347da5be..016a05d27 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) @@ -74,7 +74,7 @@ func New(c rest.Interface) *SparkoperatorV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := v1beta1.GroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go index 6d2218ba4..be3cdec5e 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go index aa2a99439..b5bddecae 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go index 38b006368..2a1b6f883 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -24,7 +24,7 @@ import ( "context" "time" - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go index cc541f5dd..6e6e17a29 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -24,7 +24,7 @@ import ( "context" "time" - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go index cb8dc2073..6d9b3ae85 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 6992de7df..0e952e11e 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -23,8 +23,8 @@ package externalversions import ( "fmt" - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) @@ -56,15 +56,15 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=sparkoperator.k8s.io, Version=v1beta1 - case v1beta1.SchemeGroupVersion.WithResource("scheduledsparkapplications"): + case v1beta1.GroupVersion.WithResource("scheduledsparkapplications"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1beta1().ScheduledSparkApplications().Informer()}, nil - case v1beta1.SchemeGroupVersion.WithResource("sparkapplications"): + case v1beta1.GroupVersion.WithResource("sparkapplications"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1beta1().SparkApplications().Informer()}, nil // Group=sparkoperator.k8s.io, Version=v1beta2 - case v1beta2.SchemeGroupVersion.WithResource("scheduledsparkapplications"): + case v1beta2.GroupVersion.WithResource("scheduledsparkapplications"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1beta2().ScheduledSparkApplications().Informer()}, nil - case v1beta2.SchemeGroupVersion.WithResource("sparkapplications"): + case v1beta2.GroupVersion.WithResource("sparkapplications"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1beta2().SparkApplications().Informer()}, nil } diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go index 46de9ba52..78564956b 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go @@ -24,7 +24,7 @@ import ( "context" time "time" - sparkoperatork8siov1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + sparkoperatork8siov1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" v1beta1 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go index f38734232..b33dd91ea 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go @@ -24,7 +24,7 @@ import ( "context" time "time" - sparkoperatork8siov1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + sparkoperatork8siov1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" v1beta1 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go index ffa1fddd3..6c0a0ac99 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -24,7 +24,7 @@ import ( "context" time "time" - sparkoperatork8siov1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkoperatork8siov1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" v1beta2 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go index da42c12ec..5ad478876 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -24,7 +24,7 @@ import ( "context" time "time" - sparkoperatork8siov1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkoperatork8siov1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" v1beta2 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go index f3921e810..b0058373b 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go index 51ceafa4d..9afc432f5 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go index f70331d5b..c4d9faa0c 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go index 4818a3cf7..95cee753f 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/common/constants.go b/pkg/common/constants.go new file mode 100644 index 000000000..59ffc8708 --- /dev/null +++ b/pkg/common/constants.go @@ -0,0 +1,50 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +const ( + ErrorCodePodAlreadyExists = "code=409" +) + +const ( + SparkApplicationFinalizerName = "sparkoperator.k8s.io/finalizer" + ScheduledSparkApplicationFinalizerName = "sparkoperator.k8s.io/finalizer" +) + +const ( + RSAKeySize = 2048 +) + +const ( + CAKeyPem = "ca-key.pem" + CACertPem = "ca-cert.pem" + ServerKeyPem = "server-key.pem" + ServerCertPem = "server-cert.pem" +) + +// Kubernetes volume types. +const ( + VolumeTypeEmptyDir = "emptyDir" + VolumeTypeHostPath = "hostPath" + VolumeTypeNFS = "nfs" + VolumeTypePersistentVolumeClaim = "persistentVolumeClaim" +) + +const ( + // Epsilon is a small number used to compare 64 bit floating point numbers. + Epsilon = 1e-9 +) diff --git a/pkg/config/doc.go b/pkg/common/doc.go similarity index 89% rename from pkg/config/doc.go rename to pkg/common/doc.go index f5c09720c..4ff7b522b 100644 --- a/pkg/config/doc.go +++ b/pkg/common/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package config +package common -// Package config contains code that deals with configuration of Spark driver and executor pods, e.g., mounting +// Package common contains code that deals with configuration of Spark driver and executor pods, e.g., mounting // user-specified ConfigMaps, volumes, secrets, etc. diff --git a/pkg/common/event.go b/pkg/common/event.go new file mode 100644 index 000000000..0c469ce92 --- /dev/null +++ b/pkg/common/event.go @@ -0,0 +1,58 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// SparkApplication events +const ( + EventSparkApplicationAdded = "SparkApplicationAdded" + + EventSparkApplicationSubmitted = "SparkApplicationSubmitted" + + EventSparkApplicationSubmissionFailed = "SparkApplicationSubmissionFailed" + + EventSparkApplicationCompleted = "SparkApplicationCompleted" + + EventSparkApplicationFailed = "SparkApplicationFailed" + + EventSparkApplicationPendingRerun = "SparkApplicationPendingRerun" +) + +// Spark driver events +const ( + EventSparkDriverPending = "SparkDriverPending" + + EventSparkDriverRunning = "SparkDriverRunning" + + EventSparkDriverCompleted = "SparkDriverCompleted" + + EventSparkDriverFailed = "SparkDriverFailed" + + EventSparkDriverUnknown = "SparkDriverUnknown" +) + +// Spark executor events +const ( + EventSparkExecutorPending = "SparkExecutorPending" + + EventSparkExecutorRunning = "SparkExecutorRunning" + + EventSparkExecutorCompleted = "SparkExecutorCompleted" + + EventSparkExecutorFailed = "SparkExecutorFailed" + + EventSparkExecutorUnknown = "SparkExecutorUnknown" +) diff --git a/pkg/common/metrics.go b/pkg/common/metrics.go new file mode 100644 index 000000000..7e38dd7ba --- /dev/null +++ b/pkg/common/metrics.go @@ -0,0 +1,49 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// Spark application metric names. +const ( + MetricSparkApplicationCount = "spark_application_count" + + MetricSparkApplicationSubmitCount = "spark_application_submit_count" + + MetricSparkApplicationFailedSubmissionCount = "spark_application_failed_submission_count" + + MetricSparkApplicationRunningCount = "spark_application_running_count" + + MetricSparkApplicationSuccessCount = "spark_application_success_count" + + MetricSparkApplicationFailureCount = "spark_application_failure_count" + + MetricSparkApplicationSuccessExecutionTimeSeconds = "spark_application_success_execution_time_seconds" + + MetricSparkApplicationFailureExecutionTimeSeconds = "spark_application_failure_execution_time_seconds" + + MetricSparkApplicationStartLatencySeconds = "spark_application_start_latency_seconds" + + MetricSparkApplicationStartLatencySecondsHistogram = "spark_application_start_latency_seconds_histogram" +) + +// Spark executor metric names. +const ( + MetricSparkExecutorRunningCount = "spark_executor_running_count" + + MetricSparkExecutorSuccessCount = "spark_executor_success_count" + + MetricSparkExecutorFailureCount = "spark_executor_failure_count" +) diff --git a/pkg/common/prometheus.go b/pkg/common/prometheus.go new file mode 100644 index 000000000..2e141f327 --- /dev/null +++ b/pkg/common/prometheus.go @@ -0,0 +1,139 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +const ( + // PrometheusConfigMapNameSuffix is the name prefix of the Prometheus ConfigMap. + PrometheusConfigMapNameSuffix = "prom-conf" + + // PrometheusConfigMapMountPath is the mount path of the Prometheus ConfigMap. + PrometheusConfigMapMountPath = "/etc/metrics/conf" +) + +const ( + MetricsPropertiesKey = "metrics.properties" + PrometheusConfigKey = "prometheus.yaml" + PrometheusScrapeAnnotation = "prometheus.io/scrape" + PrometheusPortAnnotation = "prometheus.io/port" + PrometheusPathAnnotation = "prometheus.io/path" +) + +// DefaultMetricsProperties is the default content of metrics.properties. +const DefaultMetricsProperties = ` +*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink +driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource +executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource` + +// DefaultPrometheusConfiguration is the default content of prometheus.yaml. +const DefaultPrometheusConfiguration = ` +lowercaseOutputName: true +attrNameSnakeCase: true +rules: + - pattern: metrics<>Value + name: spark_driver_$3_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Value + name: spark_streaming_driver_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Value + name: spark_structured_streaming_driver_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + query_name: "$3" + - pattern: metrics<>Value + name: spark_executor_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Count + name: spark_driver_DAGScheduler_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_driver_HiveExternalCatalog_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_driver_CodeGenerator_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_driver_LiveListenerBus_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Value + name: spark_driver_LiveListenerBus_$3 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_executor_$4_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Value + name: spark_executor_$4_$5 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Count + name: spark_executor_HiveExternalCatalog_$4_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Count + name: spark_executor_CodeGenerator_$4_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" +` + +// DefaultPrometheusJavaAgentPort is the default port used by the Prometheus JMX exporter. +const DefaultPrometheusJavaAgentPort int32 = 8090 + +// DefaultPrometheusPortProtocol is the default protocol used by the Prometheus JMX exporter. +const DefaultPrometheusPortProtocol string = "TCP" + +// DefaultPrometheusPortName is the default port name used by the Prometheus JMX exporter. +const DefaultPrometheusPortName string = "jmx-exporter" diff --git a/pkg/common/spark.go b/pkg/common/spark.go new file mode 100644 index 000000000..3c53c5fe6 --- /dev/null +++ b/pkg/common/spark.go @@ -0,0 +1,370 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// Spark environment variables. +const ( + EnvSparkHome = "SPARK_HOME" + + EnvKubernetesServiceHost = "KUBERNETES_SERVICE_HOST" + + EnvKubernetesServicePort = "KUBERNETES_SERVICE_PORT" +) + +// Spark properties. +const ( + // SparkAppName is the configuration property for application name. + SparkAppName = "spark.app.name" + + SparkDriverCores = "spark.driver.cores" + + SparkDriverMemory = "spark.driver.memory" + + SparkDriverMemoryOverhead = "spark.driver.memoryOverhead" + + SparkExecutorInstances = "spark.executor.instances" + + SparkExecutorEnvTemplate = "spark.executor.env.%s" + + SparkExecutorCores = "spark.executor.cores" + + SparkExecutorMemory = "spark.executor.memory" + + SparkExecutorMemoryOverhead = "spark.executor.memoryOverhead" + + SparkUIProxyBase = "spark.ui.proxyBase" + + SparkUIProxyRedirectURI = "spark.ui.proxyRedirectUri" +) + +// Spark on Kubernetes properties. +const ( + + // SparkKubernetesDriverMaster is the Spark configuration key for specifying the Kubernetes master the driver use + // to manage executor pods and other Kubernetes resources. + SparkKubernetesDriverMaster = "spark.kubernetes.driver.master" + + // SparkKubernetesNamespace is the configuration property for application namespace. + SparkKubernetesNamespace = "spark.kubernetes.namespace" + + // SparkKubernetesContainerImage is the configuration property for specifying the unified container image. + SparkKubernetesContainerImage = "spark.kubernetes.container.image" + + // SparkKubernetesContainerImagePullPolicy is the configuration property for specifying the container image pull policy. + SparkKubernetesContainerImagePullPolicy = "spark.kubernetes.container.image.pullPolicy" + + // SparkKubernetesContainerImagePullSecrets is the configuration property for specifying the comma-separated list of image-pull + // secrets. + SparkKubernetesContainerImagePullSecrets = "spark.kubernetes.container.image.pullSecrets" + + SparkKubernetesAllocationBatchSize = "spark.kubernetes.allocation.batch.size" + + SparkKubernetesAllocationBatchDelay = "spark.kubernetes.allocation.batch.delay" + + // SparkKubernetesAuthenticateDriverServiceAccountName is the Spark configuration key for specifying name of the Kubernetes service + // account used by the driver pod. + SparkKubernetesAuthenticateDriverServiceAccountName = "spark.kubernetes.authenticate.driver.serviceAccountName" + + // account used by the executor pod. + SparkKubernetesAuthenticateExecutorServiceAccountName = "spark.kubernetes.authenticate.executor.serviceAccountName" + + // SparkKubernetesDriverLabelPrefix is the Spark configuration key prefix for labels on the driver Pod. + SparkKubernetesDriverLabelTemplate = "spark.kubernetes.driver.label.%s" + + // SparkKubernetesDriverAnnotationPrefix is the Spark configuration key prefix for annotations on the driver Pod. + SparkKubernetesDriverAnnotationTemplate = "spark.kubernetes.driver.annotation.%s" + + // SparkKubernetesDriverServiceLabelPrefix is the key prefix of annotations to be added to the driver service. + SparkKubernetesDriverServiceLabelTemplate = "spark.kubernetes.driver.service.label.%s" + + // SparkKubernetesDriverServiceAnnotationPrefix is the key prefix of annotations to be added to the driver service. + SparkKubernetesDriverServiceAnnotationTemplate = "spark.kubernetes.driver.service.annotation.%s" + + // SparkKubernetesExecutorLabelPrefix is the Spark configuration key prefix for labels on the executor Pods. + SparkKubernetesExecutorLabelTemplate = "spark.kubernetes.executor.label.%s" + + // SparkKubernetesExecutorAnnotationPrefix is the Spark configuration key prefix for annotations on the executor Pods. + SparkKubernetesExecutorAnnotationTemplate = "spark.kubernetes.executor.annotation.%s" + + // SparkKubernetesDriverPodName is the Spark configuration key for driver pod name. + SparkKubernetesDriverPodName = "spark.kubernetes.driver.pod.name" + + SparkKubernetesExecutorPodNamePrefix = "spark.kubernetes.executor.podNamePrefix" + + // SparkKubernetesDriverRequestCores is the configuration property for specifying the physical CPU request for the driver. + SparkKubernetesDriverRequestCores = "spark.kubernetes.driver.request.cores" + + // SparkKubernetesDriverLimitCores is the configuration property for specifying the hard CPU limit for the driver pod. + SparkKubernetesDriverLimitCores = "spark.kubernetes.driver.limit.cores" + + // SparkKubernetesExecutorRequestCores is the configuration property for specifying the physical CPU request for executors. + SparkKubernetesExecutorRequestCores = "spark.kubernetes.executor.request.cores" + + // SparkKubernetesExecutorLimitCores is the configuration property for specifying the hard CPU limit for the executor pods. + SparkKubernetesExecutorLimitCores = "spark.kubernetes.executor.limit.cores" + + // SparkKubernetesNodeSelectorPrefix is the configuration property prefix for specifying node selector for the pods. + SparkKubernetesNodeSelectorTemplate = "spark.kubernetes.node.selector.%s" + + SparkKubernetesDriverNodeSelectorTemplate = "spark.kubernetes.driver.node.selector.%s" + + SparkKubernetesExecutorNodeSelectorTemplate = "spark.kubernetes.executor.node.selector.%s" + + // SparkKubernetesDriverEnvPrefix is the Spark configuration prefix for setting environment variables + // into the driver. + SparkKubernetesDriverEnvTemplate = "spark.kubernetes.driverEnv.%s" + + // SparkKubernetesDriverSecretsPrefix is the configuration property prefix for specifying secrets to be mounted into the + // driver. + SparkKubernetesDriverSecretsTemplate = "spark.kubernetes.driver.secrets.%s" + + // SparkKubernetesExecutorSecretsPrefix is the configuration property prefix for specifying secrets to be mounted into the + // executors. + SparkKubernetesExecutorSecretsTemplate = "spark.kubernetes.executor.secrets.%s" + + // SparkKubernetesDriverSecretKeyRefPrefix is the configuration property prefix for specifying environment variables + // from SecretKeyRefs for the driver. + SparkKubernetesDriverSecretKeyRefTemplate = "spark.kubernetes.driver.secretKeyRef.%s" + + // SparkKubernetesExecutorSecretKeyRefPrefix is the configuration property prefix for specifying environment variables + // from SecretKeyRefs for the executors. + SparkKubernetesExecutorSecretKeyRefTemplate = "spark.kubernetes.executor.secretKeyRef.%s" + + // SparkKubernetesDriverContainerImage is the configuration property for specifying a custom driver container image. + SparkKubernetesDriverContainerImage = "spark.kubernetes.driver.container.image" + + // SparkKubernetesExecutorContainerImage is the configuration property for specifying a custom executor container image. + SparkKubernetesExecutorContainerImage = "spark.kubernetes.executor.container.image" + + // SparkKubernetesDriverVolumesPrefix is the Spark volumes configuration for mounting a volume into the driver pod. + SparkKubernetesDriverVolumesPrefix = "spark.kubernetes.driver.volumes." + SparkKubernetesDriverVolumesMountPathTemplate = "spark.kubernetes.driver.volumes.%s.%s.mount.path" + SparkKubernetesDriverVolumesMountSubPathTemplate = "spark.kubernetes.driver.volumes.%s.%s.mount.subPath" + SparkKubernetesDriverVolumesMountReadOnlyTemplate = "spark.kubernetes.driver.volumes.%s.%s.mount.readOnly" + SparkKubernetesDriverVolumesOptionsTemplate = "spark.kubernetes.driver.volumes.%s.%s.options.%s" + + // SparkKubernetesExecutorVolumesPrefix is the Spark volumes configuration for mounting a volume into the driver pod. + SparkKubernetesExecutorVolumesPrefix = "spark.kubernetes.executor.volumes." + SparkKubernetesExecutorVolumesMountPathTemplate = "spark.kubernetes.executor.volumes.%s.%s.mount.path" + SparkKubernetesExecutorVolumesMountSubPathTemplate = "spark.kubernetes.executor.volumes.%s.%s.mount.subPath" + SparkKubernetesExecutorVolumesMountReadOnlyTemplate = "spark.kubernetes.executor.volumes.%s.%s.mount.readOnly" + SparkKubernetesExecutorVolumesOptionsTemplate = "spark.kubernetes.executor.volumes.%s.%s.options.%s" + + // SparkKubernetesMemoryOverheadFactor is the Spark configuration key for specifying memory overhead factor used for Non-JVM memory. + SparkKubernetesMemoryOverheadFactor = "spark.kubernetes.memoryOverheadFactor" + + // SparkKubernetesPysparkPythonVersion is the Spark configuration key for specifying python version used. + SparkKubernetesPysparkPythonVersion = "spark.kubernetes.pyspark.pythonVersion" + + SparkKubernetesDriverPodTemplateFile = "spark.kubernetes.driver.podTemplateFile" + + SparkKubernetesDriverPodTemplateContainerName = "spark.kubernetes.driver.podTemplateContainerName" + + SparkKubernetesExecutorPodTemplateFile = "spark.kubernetes.executor.podTemplateFile" + + SparkKubernetesExecutorPodTemplateContainerName = "spark.kubernetes.executor.podTemplateContainerName" + + SparkKubernetesDriverSchedulerName = "spark.kubernetes.driver.schedulerName" + + SparkKubernetesExecutorSchedulerName = "spark.kubernetes.executor.schedulerName" + + // SparkExecutorEnvVarConfigKeyPrefix is the Spark configuration prefix for setting environment variables + // into the executor. + SparkExecutorEnvVarConfigKeyPrefix = "spark.executorEnv." + + // SparkKubernetesInitContainerImage is the Spark configuration key for specifying a custom init-container image. + SparkKubernetesInitContainerImage = "spark.kubernetes.initContainer.image" + + // SparkKubernetesMountDependenciesJarsDownloadDir is the Spark configuration key for specifying the download path in the driver and + // executors for remote jars. + SparkKubernetesMountDependenciesJarsDownloadDir = "spark.kubernetes.mountDependencies.jarsDownloadDir" + + // SparkKubernetesMountDependenciesFilesDownloadDir is the Spark configuration key for specifying the download path in the driver and + // executors for remote files. + SparkKubernetesMountDependenciesFilesDownloadDir = "spark.kubernetes.mountDependencies.filesDownloadDir" + + // SparkKubernetesMountDependenciesTimeout is the Spark configuration key for specifying the timeout in seconds of downloading + // remote dependencies. + SparkKubernetesMountDependenciesTimeout = "spark.kubernetes.mountDependencies.timeout" + + // SparkKubernetesMountDependenciesMaxSimultaneousDownloads is the Spark configuration key for specifying the maximum number of remote + // dependencies to download. + SparkKubernetesMountDependenciesMaxSimultaneousDownloads = "spark.kubernetes.mountDependencies.maxSimultaneousDownloads" + + // SparkKubernetesSubmissionWaitAppCompletion is the Spark configuration key for specifying whether to wait for application to complete. + SparkKubernetesSubmissionWaitAppCompletion = "spark.kubernetes.submission.waitAppCompletion" + + // SparkDriverExtraJavaOptions is the Spark configuration key for a string of extra JVM options to pass to driver. + SparkDriverExtraJavaOptions = "spark.driver.extraJavaOptions" + + // SparkExecutorExtraJavaOptions is the Spark configuration key for a string of extra JVM options to pass to executors. + SparkExecutorExtraJavaOptions = "spark.executor.extraJavaOptions" + + // SparkKubernetesExecutorDeleteOnTermination is the Spark configuration for specifying whether executor pods should be deleted in case of failure or normal termination. + SparkKubernetesExecutorDeleteOnTermination = "spark.kubernetes.executor.deleteOnTermination" +) + +// Dynamic allocation properties. +// Ref: https://spark.apache.org/docs/latest/configuration.html#dynamic-allocation +const ( + // SparkDynamicAllocationEnabled is the Spark configuration key for specifying if dynamic + // allocation is enabled or not. + SparkDynamicAllocationEnabled = "spark.dynamicAllocation.enabled" + + SparkDynamicAllocationExecutorIdleTimeout = "spark.dynamicAllocation.executorIdleTimeout" + + SparkDynamicAllocationCachedExecutorIdleTimeout = "spark.dynamicAllocation.cachedExecutorIdleTimeout" + + // SparkDynamicAllocationInitialExecutors is the Spark configuration key for specifying + // the initial number of executors to request if dynamic allocation is enabled. + SparkDynamicAllocationInitialExecutors = "spark.dynamicAllocation.initialExecutors" + + // SparkDynamicAllocationMaxExecutors is the Spark configuration key for specifying the + // upper bound of the number of executors to request if dynamic allocation is enabled. + SparkDynamicAllocationMaxExecutors = "spark.dynamicAllocation.maxExecutors" + + // SparkDynamicAllocationMinExecutors is the Spark configuration key for specifying the + // lower bound of the number of executors to request if dynamic allocation is enabled. + SparkDynamicAllocationMinExecutors = "spark.dynamicAllocation.minExecutors" + + SparkDynamicAllocationExecutorAllocationRatio = "spark.dynamicAllocation.executorAllocationRatio" + + SparkDynamicAllocationSchedulerBacklogTimeout = "spark.dynamicAllocation.schedulerBacklogTimeout" + + SparkDynamicAllocationSustainedSchedulerBacklogTimeout = "spark.dynamicAllocation.sustainedSchedulerBacklogTimeout" + + // SparkDynamicAllocationShuffleTrackingEnabled is the Spark configuration key for + // specifying if shuffle data tracking is enabled. + SparkDynamicAllocationShuffleTrackingEnabled = "spark.dynamicAllocation.shuffleTracking.enabled" + + // SparkDynamicAllocationShuffleTrackingTimeout is the Spark configuration key for specifying + // the shuffle tracking timeout in milliseconds if shuffle tracking is enabled. + SparkDynamicAllocationShuffleTrackingTimeout = "spark.dynamicAllocation.shuffleTracking.timeout" +) + +const ( + // SparkRoleDriver is the value of the spark-role label for the driver. + SparkRoleDriver = "driver" + + // SparkRoleExecutor is the value of the spark-role label for the executors. + SparkRoleExecutor = "executor" +) + +const ( + // DefaultSparkConfDir is the default directory for Spark configuration files if not specified. + // This directory is where the Spark ConfigMap is mounted in the driver and executor containers. + DefaultSparkConfDir = "/etc/spark/conf" + + // SparkConfigMapVolumeName is the name of the ConfigMap volume of Spark configuration files. + SparkConfigMapVolumeName = "spark-configmap-volume" + + // DefaultHadoopConfDir is the default directory for Spark configuration files if not specified. + // This directory is where the Hadoop ConfigMap is mounted in the driver and executor containers. + DefaultHadoopConfDir = "/etc/hadoop/conf" + + // HadoopConfigMapVolumeName is the name of the ConfigMap volume of Hadoop configuration files. + HadoopConfigMapVolumeName = "hadoop-configmap-volume" + + // EnvSparkConfDir is the environment variable to add to the driver and executor Pods that point + // to the directory where the Spark ConfigMap is mounted. + EnvSparkConfDir = "SPARK_CONF_DIR" + + // EnvHadoopConfDir is the environment variable to add to the driver and executor Pods that point + // to the directory where the Hadoop ConfigMap is mounted. + EnvHadoopConfDir = "HADOOP_CONF_DIR" +) + +const ( + // LabelSparkApplicationSelector is the AppID set by the spark-distribution on the driver/executors Pods. + LabelSparkApplicationSelector = "spark-app-selector" + + // LabelSparkRole is the driver/executor label set by the operator/spark-distribution on the driver/executors Pods. + LabelSparkRole = "spark-role" + + // LabelAnnotationPrefix is the prefix of every labels and annotations added by the controller. + LabelAnnotationPrefix = "sparkoperator.k8s.io/" + + // LabelSparkAppName is the name of the label for the SparkApplication object name. + LabelSparkAppName = LabelAnnotationPrefix + "app-name" + + // LabelScheduledSparkAppName is the name of the label for the ScheduledSparkApplication object name. + LabelScheduledSparkAppName = LabelAnnotationPrefix + "scheduled-app-name" + + // LabelLaunchedBySparkOperator is a label on Spark pods launched through the Spark Operator. + LabelLaunchedBySparkOperator = LabelAnnotationPrefix + "launched-by-spark-operator" + + // LabelSubmissionID is the label that records the submission ID of the current run of an application. + LabelSubmissionID = LabelAnnotationPrefix + "submission-id" +) + +const ( + // SparkDriverContainerName is name of driver container in spark driver pod. + SparkDriverContainerName = "spark-kubernetes-driver" + + // SparkExecutorContainerName is name of executor container in spark executor pod. + SparkExecutorContainerName = "executor" + + // Spark3DefaultExecutorContainerName is the default executor container name in + // Spark 3.x, which allows the container name to be configured through the pod + // template support. + Spark3DefaultExecutorContainerName = "spark-kubernetes-executor" + + // SparkLocalDirVolumePrefix is the volume name prefix for "scratch" space directory. + SparkLocalDirVolumePrefix = "spark-local-dir-" +) + +const ( + SparkUIPortKey = "spark.ui.port" + + DefaultSparkWebUIPort int32 = 4040 + + DefaultSparkWebUIPortName = "spark-driver-ui-port" +) + +// https://spark.apache.org/docs/latest/configuration.html +const ( + DefaultCPUMilliCores = 1000 + + DefaultMemoryBytes = 1 << 30 // 1 Gi + + DefaultJVMMemoryOverheadFactor = 0.1 + + DefaultNonJVMMemoryOverheadFactor = 0.4 + + MinMemoryOverhead = 384 * (1 << 20) // 384 Mi +) + +const ( + // EnvGoogleApplicationCredentials is the environment variable used by the + // Application Default Credentials mechanism. More details can be found at + // https://developers.google.com/identity/protocols/application-default-credentials. + EnvGoogleApplicationCredentials = "GOOGLE_APPLICATION_CREDENTIALS" + + // ServiceAccountJSONKeyFileName is the assumed name of the service account + // Json key file. This name is added to the service account secret mount path to + // form the path to the Json key file referred to by GOOGLE_APPLICATION_CREDENTIALS. + ServiceAccountJSONKeyFileName = "key.json" + + // EnvHadoopTokenFileLocation is the environment variable for specifying the location + // where the file storing the Hadoop delegation token is located. + EnvHadoopTokenFileLocation = "HADOOP_TOKEN_FILE_LOCATION" + + // HadoopDelegationTokenFileName is the assumed name of the file storing the Hadoop + // delegation token. This name is added to the delegation token secret mount path to + // form the path to the file referred to by HADOOP_TOKEN_FILE_LOCATION. + HadoopDelegationTokenFileName = "hadoop.token" +) diff --git a/pkg/common/volcano.go b/pkg/common/volcano.go new file mode 100644 index 000000000..cda710480 --- /dev/null +++ b/pkg/common/volcano.go @@ -0,0 +1,23 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +const ( + VolcanoSchedulerName = "volcano" + + VolcanoPodGroupName = "podgroups.scheduling.volcano.sh" +) diff --git a/pkg/config/config.go b/pkg/config/config.go deleted file mode 100644 index 18a708c6d..000000000 --- a/pkg/config/config.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -// GetDriverAnnotationOption returns a spark-submit option for a driver annotation of the given key and value. -func GetDriverAnnotationOption(key string, value string) string { - return fmt.Sprintf("%s%s=%s", SparkDriverAnnotationKeyPrefix, key, value) -} - -// GetExecutorAnnotationOption returns a spark-submit option for an executor annotation of the given key and value. -func GetExecutorAnnotationOption(key string, value string) string { - return fmt.Sprintf("%s%s=%s", SparkExecutorAnnotationKeyPrefix, key, value) -} - -// GetDriverEnvVarConfOptions returns a list of spark-submit options for setting driver environment variables. -func GetDriverEnvVarConfOptions(app *v1beta2.SparkApplication) []string { - var envVarConfOptions []string - for key, value := range app.Spec.Driver.EnvVars { - envVar := fmt.Sprintf("%s%s=%s", SparkDriverEnvVarConfigKeyPrefix, key, value) - envVarConfOptions = append(envVarConfOptions, envVar) - } - return envVarConfOptions -} - -// GetExecutorEnvVarConfOptions returns a list of spark-submit options for setting executor environment variables. -func GetExecutorEnvVarConfOptions(app *v1beta2.SparkApplication) []string { - var envVarConfOptions []string - for key, value := range app.Spec.Executor.EnvVars { - envVar := fmt.Sprintf("%s%s=%s", SparkExecutorEnvVarConfigKeyPrefix, key, value) - envVarConfOptions = append(envVarConfOptions, envVar) - } - return envVarConfOptions -} - -// GetPrometheusConfigMapName returns the name of the ConfigMap for Prometheus configuration. -func GetPrometheusConfigMapName(app *v1beta2.SparkApplication) string { - return fmt.Sprintf("%s-%s", app.Name, PrometheusConfigMapNameSuffix) -} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go deleted file mode 100644 index 485c1cb28..000000000 --- a/pkg/config/config_test.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -func TestGetDriverEnvVarConfOptions(t *testing.T) { - app := &v1beta2.SparkApplication{ - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - EnvVars: map[string]string{ - "ENV1": "VALUE1", - "ENV2": "VALUE2", - }, - }, - }, - }, - } - - options := GetDriverEnvVarConfOptions(app) - optionsMap := map[string]bool{ - strings.TrimPrefix(options[0], SparkDriverEnvVarConfigKeyPrefix): true, - strings.TrimPrefix(options[1], SparkDriverEnvVarConfigKeyPrefix): true, - } - assert.Equal(t, 2, len(optionsMap)) - assert.True(t, optionsMap["ENV1=VALUE1"]) - assert.True(t, optionsMap["ENV2=VALUE2"]) -} - -func TestGetExecutorEnvVarConfOptions(t *testing.T) { - app := &v1beta2.SparkApplication{ - Spec: v1beta2.SparkApplicationSpec{ - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - EnvVars: map[string]string{ - "ENV1": "VALUE1", - "ENV2": "VALUE2", - }, - }, - }, - }, - } - - options := GetExecutorEnvVarConfOptions(app) - optionsMap := map[string]bool{ - strings.TrimPrefix(options[0], SparkExecutorEnvVarConfigKeyPrefix): true, - strings.TrimPrefix(options[1], SparkExecutorEnvVarConfigKeyPrefix): true, - } - assert.Equal(t, 2, len(optionsMap)) - assert.True(t, optionsMap["ENV1=VALUE1"]) - assert.True(t, optionsMap["ENV2=VALUE2"]) -} diff --git a/pkg/config/constants.go b/pkg/config/constants.go deleted file mode 100644 index b65f3a38c..000000000 --- a/pkg/config/constants.go +++ /dev/null @@ -1,317 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -const ( - // DefaultSparkConfDir is the default directory for Spark configuration files if not specified. - // This directory is where the Spark ConfigMap is mounted in the driver and executor containers. - DefaultSparkConfDir = "/etc/spark/conf" - // SparkConfigMapVolumeName is the name of the ConfigMap volume of Spark configuration files. - SparkConfigMapVolumeName = "spark-configmap-volume" - // DefaultHadoopConfDir is the default directory for Spark configuration files if not specified. - // This directory is where the Hadoop ConfigMap is mounted in the driver and executor containers. - DefaultHadoopConfDir = "/etc/hadoop/conf" - // HadoopConfigMapVolumeName is the name of the ConfigMap volume of Hadoop configuration files. - HadoopConfigMapVolumeName = "hadoop-configmap-volume" - // SparkConfDirEnvVar is the environment variable to add to the driver and executor Pods that point - // to the directory where the Spark ConfigMap is mounted. - SparkConfDirEnvVar = "SPARK_CONF_DIR" - // HadoopConfDirEnvVar is the environment variable to add to the driver and executor Pods that point - // to the directory where the Hadoop ConfigMap is mounted. - HadoopConfDirEnvVar = "HADOOP_CONF_DIR" -) - -const ( - // LabelAnnotationPrefix is the prefix of every labels and annotations added by the controller. - LabelAnnotationPrefix = "sparkoperator.k8s.io/" - // SparkAppNameLabel is the name of the label for the SparkApplication object name. - SparkAppNameLabel = LabelAnnotationPrefix + "app-name" - // ScheduledSparkAppNameLabel is the name of the label for the ScheduledSparkApplication object name. - ScheduledSparkAppNameLabel = LabelAnnotationPrefix + "scheduled-app-name" - // LaunchedBySparkOperatorLabel is a label on Spark pods launched through the Spark Operator. - LaunchedBySparkOperatorLabel = LabelAnnotationPrefix + "launched-by-spark-operator" - // SparkApplicationSelectorLabel is the AppID set by the spark-distribution on the driver/executors Pods. - SparkApplicationSelectorLabel = "spark-app-selector" - // SparkRoleLabel is the driver/executor label set by the operator/spark-distribution on the driver/executors Pods. - SparkRoleLabel = "spark-role" - // SparkDriverRole is the value of the spark-role label for the driver. - SparkDriverRole = "driver" - // SparkExecutorRole is the value of the spark-role label for the executors. - SparkExecutorRole = "executor" - // SubmissionIDLabel is the label that records the submission ID of the current run of an application. - SubmissionIDLabel = LabelAnnotationPrefix + "submission-id" -) - -const ( - // SparkAppNameKey is the configuration property for application name. - SparkAppNameKey = "spark.app.name" - // SparkAppNamespaceKey is the configuration property for application namespace. - SparkAppNamespaceKey = "spark.kubernetes.namespace" - // SparkContainerImageKey is the configuration property for specifying the unified container image. - SparkContainerImageKey = "spark.kubernetes.container.image" - // SparkImagePullSecretKey is the configuration property for specifying the comma-separated list of image-pull - // secrets. - SparkImagePullSecretKey = "spark.kubernetes.container.image.pullSecrets" - // SparkContainerImagePullPolicyKey is the configuration property for specifying the container image pull policy. - SparkContainerImagePullPolicyKey = "spark.kubernetes.container.image.pullPolicy" - // SparkNodeSelectorKeyPrefix is the configuration property prefix for specifying node selector for the pods. - SparkNodeSelectorKeyPrefix = "spark.kubernetes.node.selector." - // SparkDriverContainerImageKey is the configuration property for specifying a custom driver container image. - SparkDriverContainerImageKey = "spark.kubernetes.driver.container.image" - // SparkExecutorContainerImageKey is the configuration property for specifying a custom executor container image. - SparkExecutorContainerImageKey = "spark.kubernetes.executor.container.image" - // SparkDriverCoreRequestKey is the configuration property for specifying the physical CPU request for the driver. - SparkDriverCoreRequestKey = "spark.kubernetes.driver.request.cores" - // SparkExecutorCoreRequestKey is the configuration property for specifying the physical CPU request for executors. - SparkExecutorCoreRequestKey = "spark.kubernetes.executor.request.cores" - // SparkDriverCoreLimitKey is the configuration property for specifying the hard CPU limit for the driver pod. - SparkDriverCoreLimitKey = "spark.kubernetes.driver.limit.cores" - // SparkExecutorCoreLimitKey is the configuration property for specifying the hard CPU limit for the executor pods. - SparkExecutorCoreLimitKey = "spark.kubernetes.executor.limit.cores" - // SparkDriverSecretKeyPrefix is the configuration property prefix for specifying secrets to be mounted into the - // driver. - SparkDriverSecretKeyPrefix = "spark.kubernetes.driver.secrets." - // SparkExecutorSecretKeyPrefix is the configuration property prefix for specifying secrets to be mounted into the - // executors. - SparkExecutorSecretKeyPrefix = "spark.kubernetes.executor.secrets." - // SparkDriverSecretKeyRefKeyPrefix is the configuration property prefix for specifying environment variables - // from SecretKeyRefs for the driver. - SparkDriverSecretKeyRefKeyPrefix = "spark.kubernetes.driver.secretKeyRef." - // SparkExecutorSecretKeyRefKeyPrefix is the configuration property prefix for specifying environment variables - // from SecretKeyRefs for the executors. - SparkExecutorSecretKeyRefKeyPrefix = "spark.kubernetes.executor.secretKeyRef." - // SparkDriverEnvVarConfigKeyPrefix is the Spark configuration prefix for setting environment variables - // into the driver. - SparkDriverEnvVarConfigKeyPrefix = "spark.kubernetes.driverEnv." - // SparkExecutorEnvVarConfigKeyPrefix is the Spark configuration prefix for setting environment variables - // into the executor. - SparkExecutorEnvVarConfigKeyPrefix = "spark.executorEnv." - // SparkDriverAnnotationKeyPrefix is the Spark configuration key prefix for annotations on the driver Pod. - SparkDriverAnnotationKeyPrefix = "spark.kubernetes.driver.annotation." - // SparkExecutorAnnotationKeyPrefix is the Spark configuration key prefix for annotations on the executor Pods. - SparkExecutorAnnotationKeyPrefix = "spark.kubernetes.executor.annotation." - // SparkDriverLabelKeyPrefix is the Spark configuration key prefix for labels on the driver Pod. - SparkDriverLabelKeyPrefix = "spark.kubernetes.driver.label." - // SparkExecutorLabelKeyPrefix is the Spark configuration key prefix for labels on the executor Pods. - SparkExecutorLabelKeyPrefix = "spark.kubernetes.executor.label." - // SparkDriverVolumesPrefix is the Spark volumes configuration for mounting a volume into the driver pod. - SparkDriverVolumesPrefix = "spark.kubernetes.driver.volumes." - // SparkExecutorVolumesPrefix is the Spark volumes configuration for mounting a volume into the driver pod. - SparkExecutorVolumesPrefix = "spark.kubernetes.executor.volumes." - // SparkDriverPodNameKey is the Spark configuration key for driver pod name. - SparkDriverPodNameKey = "spark.kubernetes.driver.pod.name" - // SparkDriverServiceAccountName is the Spark configuration key for specifying name of the Kubernetes service - // account used by the driver pod. - SparkDriverServiceAccountName = "spark.kubernetes.authenticate.driver.serviceAccountName" - // account used by the executor pod. - SparkExecutorAccountName = "spark.kubernetes.authenticate.executor.serviceAccountName" - // SparkInitContainerImage is the Spark configuration key for specifying a custom init-container image. - SparkInitContainerImage = "spark.kubernetes.initContainer.image" - // SparkJarsDownloadDir is the Spark configuration key for specifying the download path in the driver and - // executors for remote jars. - SparkJarsDownloadDir = "spark.kubernetes.mountDependencies.jarsDownloadDir" - // SparkFilesDownloadDir is the Spark configuration key for specifying the download path in the driver and - // executors for remote files. - SparkFilesDownloadDir = "spark.kubernetes.mountDependencies.filesDownloadDir" - // SparkDownloadTimeout is the Spark configuration key for specifying the timeout in seconds of downloading - // remote dependencies. - SparkDownloadTimeout = "spark.kubernetes.mountDependencies.timeout" - // SparkMaxSimultaneousDownloads is the Spark configuration key for specifying the maximum number of remote - // dependencies to download. - SparkMaxSimultaneousDownloads = "spark.kubernetes.mountDependencies.maxSimultaneousDownloads" - // SparkWaitAppCompletion is the Spark configuration key for specifying whether to wait for application to complete. - SparkWaitAppCompletion = "spark.kubernetes.submission.waitAppCompletion" - // SparkPythonVersion is the Spark configuration key for specifying python version used. - SparkPythonVersion = "spark.kubernetes.pyspark.pythonVersion" - // SparkMemoryOverheadFactor is the Spark configuration key for specifying memory overhead factor used for Non-JVM memory. - SparkMemoryOverheadFactor = "spark.kubernetes.memoryOverheadFactor" - // SparkDriverJavaOptions is the Spark configuration key for a string of extra JVM options to pass to driver. - SparkDriverJavaOptions = "spark.driver.extraJavaOptions" - // SparkExecutorJavaOptions is the Spark configuration key for a string of extra JVM options to pass to executors. - SparkExecutorJavaOptions = "spark.executor.extraJavaOptions" - // SparkExecutorDeleteOnTermination is the Spark configuration for specifying whether executor pods should be deleted in case of failure or normal termination - SparkExecutorDeleteOnTermination = "spark.kubernetes.executor.deleteOnTermination" - // SparkDriverKubernetesMaster is the Spark configuration key for specifying the Kubernetes master the driver use - // to manage executor pods and other Kubernetes resources. - SparkDriverKubernetesMaster = "spark.kubernetes.driver.master" - // SparkDriverServiceAnnotationKeyPrefix is the key prefix of annotations to be added to the driver service. - SparkDriverServiceAnnotationKeyPrefix = "spark.kubernetes.driver.service.annotation." - // SparkDriverServiceLabelKeyPrefix is the key prefix of annotations to be added to the driver service. - SparkDriverServiceLabelKeyPrefix = "spark.kubernetes.driver.service.label." - // SparkDynamicAllocationEnabled is the Spark configuration key for specifying if dynamic - // allocation is enabled or not. - SparkDynamicAllocationEnabled = "spark.dynamicAllocation.enabled" - // SparkDynamicAllocationShuffleTrackingEnabled is the Spark configuration key for - // specifying if shuffle data tracking is enabled. - SparkDynamicAllocationShuffleTrackingEnabled = "spark.dynamicAllocation.shuffleTracking.enabled" - // SparkDynamicAllocationShuffleTrackingTimeout is the Spark configuration key for specifying - // the shuffle tracking timeout in milliseconds if shuffle tracking is enabled. - SparkDynamicAllocationShuffleTrackingTimeout = "spark.dynamicAllocation.shuffleTracking.timeout" - // SparkDynamicAllocationInitialExecutors is the Spark configuration key for specifying - // the initial number of executors to request if dynamic allocation is enabled. - SparkDynamicAllocationInitialExecutors = "spark.dynamicAllocation.initialExecutors" - // SparkDynamicAllocationMinExecutors is the Spark configuration key for specifying the - // lower bound of the number of executors to request if dynamic allocation is enabled. - SparkDynamicAllocationMinExecutors = "spark.dynamicAllocation.minExecutors" - // SparkDynamicAllocationMaxExecutors is the Spark configuration key for specifying the - // upper bound of the number of executors to request if dynamic allocation is enabled. - SparkDynamicAllocationMaxExecutors = "spark.dynamicAllocation.maxExecutors" -) - -const ( - // GoogleApplicationCredentialsEnvVar is the environment variable used by the - // Application Default Credentials mechanism. More details can be found at - // https://developers.google.com/identity/protocols/application-default-credentials. - GoogleApplicationCredentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" - // ServiceAccountJSONKeyFileName is the assumed name of the service account - // Json key file. This name is added to the service account secret mount path to - // form the path to the Json key file referred to by GOOGLE_APPLICATION_CREDENTIALS. - ServiceAccountJSONKeyFileName = "key.json" - // HadoopTokenFileLocationEnvVar is the environment variable for specifying the location - // where the file storing the Hadoop delegation token is located. - HadoopTokenFileLocationEnvVar = "HADOOP_TOKEN_FILE_LOCATION" - // HadoopDelegationTokenFileName is the assumed name of the file storing the Hadoop - // delegation token. This name is added to the delegation token secret mount path to - // form the path to the file referred to by HADOOP_TOKEN_FILE_LOCATION. - HadoopDelegationTokenFileName = "hadoop.token" -) - -const ( - // PrometheusConfigMapNameSuffix is the name prefix of the Prometheus ConfigMap. - PrometheusConfigMapNameSuffix = "prom-conf" - // PrometheusConfigMapMountPath is the mount path of the Prometheus ConfigMap. - PrometheusConfigMapMountPath = "/etc/metrics/conf" -) - -// DefaultMetricsProperties is the default content of metrics.properties. -const DefaultMetricsProperties = ` -*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink -driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource -executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource` - -// DefaultPrometheusConfiguration is the default content of prometheus.yaml. -const DefaultPrometheusConfiguration = ` -lowercaseOutputName: true -attrNameSnakeCase: true -rules: - - pattern: metrics<>Value - name: spark_driver_$3_$4 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Value - name: spark_streaming_driver_$4 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Value - name: spark_structured_streaming_driver_$4 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - query_name: "$3" - - pattern: metrics<>Value - name: spark_executor_$4 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - executor_id: "$3" - - pattern: metrics<>Count - name: spark_driver_DAGScheduler_$3_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Count - name: spark_driver_HiveExternalCatalog_$3_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Count - name: spark_driver_CodeGenerator_$3_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Count - name: spark_driver_LiveListenerBus_$3_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Value - name: spark_driver_LiveListenerBus_$3 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Count - name: spark_executor_$4_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - executor_id: "$3" - - pattern: metrics<>Value - name: spark_executor_$4_$5 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - executor_id: "$3" - - pattern: metrics<>Count - name: spark_executor_HiveExternalCatalog_$4_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - executor_id: "$3" - - pattern: metrics<>Count - name: spark_executor_CodeGenerator_$4_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - executor_id: "$3" -` - -// DefaultPrometheusJavaAgentPort is the default port used by the Prometheus JMX exporter. -const DefaultPrometheusJavaAgentPort int32 = 8090 - -// DefaultPrometheusPortProtocol is the default protocol used by the Prometheus JMX exporter. -const DefaultPrometheusPortProtocol string = "TCP" - -// DefaultPrometheusPortName is the default port name used by the Prometheus JMX exporter. -const DefaultPrometheusPortName string = "jmx-exporter" - -const ( - // SparkDriverContainerName is name of driver container in spark driver pod - SparkDriverContainerName = "spark-kubernetes-driver" - // SparkExecutorContainerName is name of executor container in spark executor pod - SparkExecutorContainerName = "executor" - // Spark3DefaultExecutorContainerName is the default executor container name in - // Spark 3.x, which allows the container name to be configured through the pod - // template support. - Spark3DefaultExecutorContainerName = "spark-kubernetes-executor" - // SparkLocalDirVolumePrefix is the volume name prefix for "scratch" space directory - SparkLocalDirVolumePrefix = "spark-local-dir-" -) diff --git a/pkg/config/secret.go b/pkg/config/secret.go deleted file mode 100644 index 1a2c7fa49..000000000 --- a/pkg/config/secret.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - "path/filepath" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -// GetDriverSecretConfOptions returns a list of spark-submit options for mounting driver secrets. -func GetDriverSecretConfOptions(app *v1beta2.SparkApplication) []string { - var secretConfOptions []string - for _, s := range app.Spec.Driver.Secrets { - conf := fmt.Sprintf("%s%s=%s", SparkDriverSecretKeyPrefix, s.Name, s.Path) - secretConfOptions = append(secretConfOptions, conf) - if s.Type == v1beta2.GCPServiceAccountSecret { - conf = fmt.Sprintf( - "%s%s=%s", - SparkDriverEnvVarConfigKeyPrefix, - GoogleApplicationCredentialsEnvVar, - filepath.Join(s.Path, ServiceAccountJSONKeyFileName)) - secretConfOptions = append(secretConfOptions, conf) - } else if s.Type == v1beta2.HadoopDelegationTokenSecret { - conf = fmt.Sprintf( - "%s%s=%s", - SparkDriverEnvVarConfigKeyPrefix, - HadoopTokenFileLocationEnvVar, - filepath.Join(s.Path, HadoopDelegationTokenFileName)) - secretConfOptions = append(secretConfOptions, conf) - } - } - return secretConfOptions -} - -// GetExecutorSecretConfOptions returns a list of spark-submit options for mounting executor secrets. -func GetExecutorSecretConfOptions(app *v1beta2.SparkApplication) []string { - var secretConfOptions []string - for _, s := range app.Spec.Executor.Secrets { - conf := fmt.Sprintf("%s%s=%s", SparkExecutorSecretKeyPrefix, s.Name, s.Path) - secretConfOptions = append(secretConfOptions, conf) - if s.Type == v1beta2.GCPServiceAccountSecret { - conf = fmt.Sprintf( - "%s%s=%s", - SparkExecutorEnvVarConfigKeyPrefix, - GoogleApplicationCredentialsEnvVar, - filepath.Join(s.Path, ServiceAccountJSONKeyFileName)) - secretConfOptions = append(secretConfOptions, conf) - } else if s.Type == v1beta2.HadoopDelegationTokenSecret { - conf = fmt.Sprintf( - "%s%s=%s", - SparkExecutorEnvVarConfigKeyPrefix, - HadoopTokenFileLocationEnvVar, - filepath.Join(s.Path, HadoopDelegationTokenFileName)) - secretConfOptions = append(secretConfOptions, conf) - } - } - return secretConfOptions -} diff --git a/pkg/config/secret_test.go b/pkg/config/secret_test.go deleted file mode 100644 index fcd0ea992..000000000 --- a/pkg/config/secret_test.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -func TestGetDriverSecretConfOptions(t *testing.T) { - app := &v1beta2.SparkApplication{ - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - Secrets: []v1beta2.SecretInfo{ - { - Name: "db-credentials", - Path: "/etc/secrets", - }, - { - Name: "gcp-service-account", - Path: "/etc/secrets", - Type: v1beta2.GCPServiceAccountSecret, - }, - { - Name: "hadoop-token", - Path: "/etc/secrets", - Type: v1beta2.HadoopDelegationTokenSecret, - }, - }, - }, - }, - }, - } - - options := GetDriverSecretConfOptions(app) - assert.Equal(t, 5, len(options)) - assert.Equal(t, fmt.Sprintf("%s=%s", "db-credentials", "/etc/secrets"), strings.TrimPrefix(options[0], - SparkDriverSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s", "gcp-service-account", "/etc/secrets"), - strings.TrimPrefix(options[1], SparkDriverSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s%s", GoogleApplicationCredentialsEnvVar, "/etc/secrets/", - ServiceAccountJSONKeyFileName), strings.TrimPrefix(options[2], SparkDriverEnvVarConfigKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s", "hadoop-token", "/etc/secrets"), strings.TrimPrefix(options[3], - SparkDriverSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s%s", HadoopTokenFileLocationEnvVar, "/etc/secrets/", - HadoopDelegationTokenFileName), strings.TrimPrefix(options[4], SparkDriverEnvVarConfigKeyPrefix)) -} - -func TestGetExecutorSecretConfOptions(t *testing.T) { - app := &v1beta2.SparkApplication{ - Spec: v1beta2.SparkApplicationSpec{ - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - Secrets: []v1beta2.SecretInfo{ - { - Name: "db-credentials", - Path: "/etc/secrets", - }, - { - Name: "gcp-service-account", - Path: "/etc/secrets", - Type: v1beta2.GCPServiceAccountSecret, - }, - { - Name: "hadoop-token", - Path: "/etc/secrets", - Type: v1beta2.HadoopDelegationTokenSecret, - }, - }, - }, - }, - }, - } - - options := GetExecutorSecretConfOptions(app) - assert.Equal(t, 5, len(options)) - assert.Equal(t, fmt.Sprintf("%s=%s", "db-credentials", "/etc/secrets"), strings.TrimPrefix(options[0], - SparkExecutorSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s", "gcp-service-account", "/etc/secrets"), - strings.TrimPrefix(options[1], SparkExecutorSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s%s", GoogleApplicationCredentialsEnvVar, "/etc/secrets/", - ServiceAccountJSONKeyFileName), strings.TrimPrefix(options[2], SparkExecutorEnvVarConfigKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s", "hadoop-token", "/etc/secrets"), strings.TrimPrefix(options[3], - SparkExecutorSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s%s", HadoopTokenFileLocationEnvVar, "/etc/secrets/", - HadoopDelegationTokenFileName), strings.TrimPrefix(options[4], SparkExecutorEnvVarConfigKeyPrefix)) -} diff --git a/pkg/controller/scheduledsparkapplication/controller.go b/pkg/controller/scheduledsparkapplication/controller.go deleted file mode 100644 index 056ba9d56..000000000 --- a/pkg/controller/scheduledsparkapplication/controller.go +++ /dev/null @@ -1,425 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduledsparkapplication - -import ( - "context" - "fmt" - "reflect" - "sort" - "time" - - "github.com/golang/glog" - "github.com/robfig/cron/v3" - - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/retry" - "k8s.io/client-go/util/workqueue" - "k8s.io/utils/clock" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" - crdscheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" -) - -var ( - keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc -) - -type Controller struct { - crdClient crdclientset.Interface - kubeClient kubernetes.Interface - extensionsClient apiextensionsclient.Interface - queue workqueue.RateLimitingInterface - cacheSynced cache.InformerSynced - ssaLister crdlisters.ScheduledSparkApplicationLister - saLister crdlisters.SparkApplicationLister - clock clock.Clock -} - -func NewController( - crdClient crdclientset.Interface, - kubeClient kubernetes.Interface, - extensionsClient apiextensionsclient.Interface, - informerFactory crdinformers.SharedInformerFactory, - clock clock.Clock) *Controller { - crdscheme.AddToScheme(scheme.Scheme) - - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), - "scheduled-spark-application-controller") - - controller := &Controller{ - crdClient: crdClient, - kubeClient: kubeClient, - extensionsClient: extensionsClient, - queue: queue, - clock: clock, - } - - informer := informerFactory.Sparkoperator().V1beta2().ScheduledSparkApplications() - informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.onAdd, - UpdateFunc: controller.onUpdate, - DeleteFunc: controller.onDelete, - }) - controller.cacheSynced = informer.Informer().HasSynced - controller.ssaLister = informer.Lister() - controller.saLister = informerFactory.Sparkoperator().V1beta2().SparkApplications().Lister() - - return controller -} - -func (c *Controller) Start(workers int, stopCh <-chan struct{}) error { - glog.Info("Starting the ScheduledSparkApplication controller") - - if !cache.WaitForCacheSync(stopCh, c.cacheSynced) { - return fmt.Errorf("timed out waiting for cache to sync") - } - - glog.Info("Starting the workers of the ScheduledSparkApplication controller") - for i := 0; i < workers; i++ { - // runWorker will loop until "something bad" happens. Until will then rekick - // the worker after one second. - go wait.Until(c.runWorker, time.Second, stopCh) - } - - return nil -} - -func (c *Controller) Stop() { - glog.Info("Stopping the ScheduledSparkApplication controller") - c.queue.ShutDown() -} - -func (c *Controller) runWorker() { - defer utilruntime.HandleCrash() - for c.processNextItem() { - } -} - -func (c *Controller) processNextItem() bool { - key, quit := c.queue.Get() - if quit { - return false - } - defer c.queue.Done(key) - - err := c.syncScheduledSparkApplication(key.(string)) - if err == nil { - // Successfully processed the key or the key was not found so tell the queue to stop tracking - // history for your key. This will reset things like failure counts for per-item rate limiting. - c.queue.Forget(key) - return true - } - - // There was a failure so be sure to report it. This method allows for pluggable error handling - // which can be used for things like cluster-monitoring - utilruntime.HandleError(fmt.Errorf("failed to sync ScheduledSparkApplication %q: %v", key, err)) - // Since we failed, we should requeue the item to work on later. This method will add a backoff - // to avoid hot-looping on particular items (they're probably still not going to work right away) - // and overall controller protection (everything I've done is broken, this controller needs to - // calm down or it can starve other useful work) cases. - c.queue.AddRateLimited(key) - - return true -} - -func (c *Controller) syncScheduledSparkApplication(key string) error { - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return err - } - app, err := c.ssaLister.ScheduledSparkApplications(namespace).Get(name) - if err != nil { - return err - } - - if app.Spec.Suspend != nil && *app.Spec.Suspend { - return nil - } - - glog.V(2).Infof("Syncing ScheduledSparkApplication %s/%s", app.Namespace, app.Name) - status := app.Status.DeepCopy() - schedule, err := cron.ParseStandard(app.Spec.Schedule) - if err != nil { - glog.Errorf("failed to parse schedule %s of ScheduledSparkApplication %s/%s: %v", app.Spec.Schedule, app.Namespace, app.Name, err) - status.ScheduleState = v1beta2.FailedValidationState - status.Reason = err.Error() - } else { - status.ScheduleState = v1beta2.ScheduledState - now := c.clock.Now() - nextRunTime := status.NextRun.Time - // if we updated the schedule for an earlier execution - those changes need to be reflected - updatedNextRunTime := schedule.Next(now) - if nextRunTime.IsZero() || updatedNextRunTime.Before(nextRunTime) { - // The first run of the application. - nextRunTime = updatedNextRunTime - status.NextRun = metav1.NewTime(nextRunTime) - } - if nextRunTime.Before(now) { - // Check if the condition for starting the next run is satisfied. - ok, err := c.shouldStartNextRun(app) - if err != nil { - return err - } - if ok { - glog.Infof("Next run of ScheduledSparkApplication %s/%s is due, creating a new SparkApplication instance", app.Namespace, app.Name) - name, err := c.startNextRun(app, now) - if err != nil { - return err - } - status.LastRun = metav1.NewTime(now) - status.NextRun = metav1.NewTime(schedule.Next(status.LastRun.Time)) - status.LastRunName = name - } - } - - if err = c.checkAndUpdatePastRuns(app, status); err != nil { - return err - } - } - - return c.updateScheduledSparkApplicationStatus(app, status) -} - -func (c *Controller) onAdd(obj interface{}) { - c.enqueue(obj) -} - -func (c *Controller) onUpdate(oldObj, newObj interface{}) { - c.enqueue(newObj) -} - -func (c *Controller) onDelete(obj interface{}) { - c.dequeue(obj) -} - -func (c *Controller) enqueue(obj interface{}) { - key, err := keyFunc(obj) - if err != nil { - glog.Errorf("failed to get key for %v: %v", obj, err) - return - } - - c.queue.AddRateLimited(key) -} - -func (c *Controller) dequeue(obj interface{}) { - key, err := keyFunc(obj) - if err != nil { - glog.Errorf("failed to get key for %v: %v", obj, err) - return - } - - c.queue.Forget(key) - c.queue.Done(key) -} - -func (c *Controller) createSparkApplication( - scheduledApp *v1beta2.ScheduledSparkApplication, t time.Time) (string, error) { - app := &v1beta2.SparkApplication{} - app.Spec = scheduledApp.Spec.Template - app.Name = fmt.Sprintf("%s-%d", scheduledApp.Name, t.UnixNano()) - app.OwnerReferences = append(app.OwnerReferences, metav1.OwnerReference{ - APIVersion: v1beta2.SchemeGroupVersion.String(), - Kind: reflect.TypeOf(v1beta2.ScheduledSparkApplication{}).Name(), - Name: scheduledApp.Name, - UID: scheduledApp.UID, - }) - app.ObjectMeta.Namespace = scheduledApp.Namespace - app.ObjectMeta.Labels = make(map[string]string) - for key, value := range scheduledApp.Labels { - app.ObjectMeta.Labels[key] = value - } - app.ObjectMeta.Labels[config.ScheduledSparkAppNameLabel] = scheduledApp.Name - _, err := c.crdClient.SparkoperatorV1beta2().SparkApplications(scheduledApp.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - return "", err - } - return app.Name, nil -} - -func (c *Controller) shouldStartNextRun(app *v1beta2.ScheduledSparkApplication) (bool, error) { - sortedApps, err := c.listSparkApplications(app) - if err != nil { - return false, err - } - if len(sortedApps) == 0 { - return true, nil - } - - // The last run (most recently started) is the first one in the sorted slice. - lastRun := sortedApps[0] - switch app.Spec.ConcurrencyPolicy { - case v1beta2.ConcurrencyAllow: - return true, nil - case v1beta2.ConcurrencyForbid: - return c.hasLastRunFinished(lastRun), nil - case v1beta2.ConcurrencyReplace: - if err := c.killLastRunIfNotFinished(lastRun); err != nil { - return false, err - } - return true, nil - } - return true, nil -} - -func (c *Controller) startNextRun(app *v1beta2.ScheduledSparkApplication, now time.Time) (string, error) { - name, err := c.createSparkApplication(app, now) - if err != nil { - glog.Errorf("failed to create a SparkApplication instance for ScheduledSparkApplication %s/%s: %v", app.Namespace, app.Name, err) - return "", err - } - return name, nil -} - -func (c *Controller) hasLastRunFinished(app *v1beta2.SparkApplication) bool { - return app.Status.AppState.State == v1beta2.CompletedState || - app.Status.AppState.State == v1beta2.FailedState -} - -func (c *Controller) killLastRunIfNotFinished(app *v1beta2.SparkApplication) error { - finished := c.hasLastRunFinished(app) - if finished { - return nil - } - - // Delete the SparkApplication object of the last run. - if err := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Delete( - context.TODO(), - app.Name, - metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}, - ); err != nil { - return err - } - - return nil -} - -func (c *Controller) checkAndUpdatePastRuns( - app *v1beta2.ScheduledSparkApplication, - status *v1beta2.ScheduledSparkApplicationStatus) error { - sortedApps, err := c.listSparkApplications(app) - if err != nil { - return err - } - - var completedRuns []string - var failedRuns []string - for _, a := range sortedApps { - if a.Status.AppState.State == v1beta2.CompletedState { - completedRuns = append(completedRuns, a.Name) - } else if a.Status.AppState.State == v1beta2.FailedState { - failedRuns = append(failedRuns, a.Name) - } - } - - var toDelete []string - status.PastSuccessfulRunNames, toDelete = bookkeepPastRuns(completedRuns, app.Spec.SuccessfulRunHistoryLimit) - for _, name := range toDelete { - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Delete(context.TODO(), name, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - } - status.PastFailedRunNames, toDelete = bookkeepPastRuns(failedRuns, app.Spec.FailedRunHistoryLimit) - for _, name := range toDelete { - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Delete(context.TODO(), name, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - } - - return nil -} - -func (c *Controller) updateScheduledSparkApplicationStatus( - app *v1beta2.ScheduledSparkApplication, - newStatus *v1beta2.ScheduledSparkApplicationStatus) error { - // If the status has not changed, do not perform an update. - if isStatusEqual(newStatus, &app.Status) { - return nil - } - - toUpdate := app.DeepCopy() - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - toUpdate.Status = *newStatus - _, updateErr := c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(toUpdate.Namespace).UpdateStatus( - context.TODO(), - toUpdate, - metav1.UpdateOptions{}, - ) - if updateErr == nil { - return nil - } - - result, err := c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(toUpdate.Namespace).Get( - context.TODO(), - toUpdate.Name, - metav1.GetOptions{}, - ) - if err != nil { - return err - } - toUpdate = result - - return updateErr - }) -} - -func (c *Controller) listSparkApplications(app *v1beta2.ScheduledSparkApplication) (sparkApps, error) { - set := labels.Set{config.ScheduledSparkAppNameLabel: app.Name} - apps, err := c.saLister.SparkApplications(app.Namespace).List(set.AsSelector()) - if err != nil { - return nil, fmt.Errorf("failed to list SparkApplications: %v", err) - } - sortedApps := sparkApps(apps) - sort.Sort(sortedApps) - return sortedApps, nil -} - -func bookkeepPastRuns(names []string, runLimit *int32) (toKeep []string, toDelete []string) { - limit := 1 - if runLimit != nil { - limit = int(*runLimit) - } - - if len(names) <= limit { - return names, nil - } - toKeep = names[:limit] - toDelete = names[limit:] - return -} - -func isStatusEqual(newStatus, currentStatus *v1beta2.ScheduledSparkApplicationStatus) bool { - return newStatus.ScheduleState == currentStatus.ScheduleState && - newStatus.LastRun == currentStatus.LastRun && - newStatus.NextRun == currentStatus.NextRun && - newStatus.LastRunName == currentStatus.LastRunName && - reflect.DeepEqual(newStatus.PastSuccessfulRunNames, currentStatus.PastSuccessfulRunNames) && - reflect.DeepEqual(newStatus.PastFailedRunNames, currentStatus.PastFailedRunNames) && - newStatus.Reason == currentStatus.Reason -} - -func int64ptr(n int64) *int64 { - return &n -} diff --git a/pkg/controller/scheduledsparkapplication/controller_test.go b/pkg/controller/scheduledsparkapplication/controller_test.go deleted file mode 100644 index 9ef610113..000000000 --- a/pkg/controller/scheduledsparkapplication/controller_test.go +++ /dev/null @@ -1,552 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduledsparkapplication - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - apiextensionsfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - kubeclientfake "k8s.io/client-go/kubernetes/fake" - kubetesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" - clocktesting "k8s.io/utils/clock/testing" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientfake "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/fake" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - "github.com/kubeflow/spark-operator/pkg/config" -) - -func TestSyncScheduledSparkApplication_Allow(t *testing.T) { - app := &v1beta2.ScheduledSparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "test-app-allow", - }, - Spec: v1beta2.ScheduledSparkApplicationSpec{ - Schedule: "@every 10m", - ConcurrencyPolicy: v1beta2.ConcurrencyAllow, - }, - } - c, clk := newFakeController() - c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - - key, _ := cache.MetaNamespaceKeyFunc(app) - options := metav1.GetOptions{} - - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - // The first run should not have been triggered. - assert.True(t, app.Status.LastRunName == "") - - // Advance the clock by 10 minutes. - clk.Step(10 * time.Minute) - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - firstRunName := app.Status.LastRunName - // The first run should have been triggered. - assert.True(t, firstRunName != "") - assert.False(t, app.Status.LastRun.IsZero()) - assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) - // The first run exists. - run, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.NotNil(t, run) - - clk.Step(5 * time.Second) - // The second sync should not start any new run. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - // Next run is not due, so LastRunName should stay the same. - assert.Equal(t, firstRunName, app.Status.LastRunName) - - // Simulate completion of the first run. - run.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(context.TODO(), run, metav1.UpdateOptions{}) - // This sync should not start any new run, but update Status.PastSuccessfulRunNames. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) - assert.Equal(t, firstRunName, app.Status.PastSuccessfulRunNames[0]) - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.NotNil(t, run) - - // This sync should not start any new run, nor update Status.PastSuccessfulRunNames. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) - assert.Equal(t, firstRunName, app.Status.PastSuccessfulRunNames[0]) - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.NotNil(t, run) - - // Advance the clock to trigger the second run. - clk.SetTime(app.Status.NextRun.Time.Add(5 * time.Second)) - // This sync should start the second run. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - // The second run should have a different name. - secondRunName := app.Status.LastRunName - assert.NotEqual(t, firstRunName, secondRunName) - assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) - // The second run exists. - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), secondRunName, options) - assert.NotNil(t, run) - - // Simulate completion of the second run. - run.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(context.TODO(), run, metav1.UpdateOptions{}) - // This sync should not start any new run, but update Status.PastSuccessfulRunNames. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) - // The first run should have been deleted due to the completion of the second run. - firstRun, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.Nil(t, firstRun) - - // This sync should not start any new run, nor update Status.PastSuccessfulRunNames. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), secondRunName, options) - assert.NotNil(t, run) - - // Test the case where we update the schedule to be more frequent - app.Spec.Schedule = "@every 2m" - recentRunName := app.Status.LastRunName - recentRunTime := app.Status.LastRun.Time - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Update(context.TODO(), app, metav1.UpdateOptions{}) - // sync our update - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - // Advance the clock by 3 minutes. - clk.Step(3 * time.Minute) - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - // A run should have been triggered - assert.NotEqual(t, recentRunName, app.Status.LastRunName) - assert.True(t, recentRunTime.Before(app.Status.LastRun.Time)) - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Status.LastRunName, options) - assert.NotNil(t, run) - // Simulate completion of the last run - run.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(context.TODO(), run, metav1.UpdateOptions{}) - // This sync should not start any new run, but update Status.PastSuccessfulRunNames. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } -} - -func TestSyncScheduledSparkApplication_Forbid(t *testing.T) { - app := &v1beta2.ScheduledSparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "test-app-forbid", - }, - Spec: v1beta2.ScheduledSparkApplicationSpec{ - Schedule: "@every 1m", - ConcurrencyPolicy: v1beta2.ConcurrencyForbid, - }, - } - c, clk := newFakeController() - c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - - key, _ := cache.MetaNamespaceKeyFunc(app) - options := metav1.GetOptions{} - - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - // The first run should not have been triggered. - assert.True(t, app.Status.LastRunName == "") - - // Advance the clock by 1 minute. - clk.Step(1 * time.Minute) - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - firstRunName := app.Status.LastRunName - // The first run should have been triggered. - assert.True(t, firstRunName != "") - assert.False(t, app.Status.LastRun.IsZero()) - assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) - // The first run exists. - run, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.NotNil(t, run) - - clk.SetTime(app.Status.NextRun.Time.Add(5 * time.Second)) - // This sync should not start the next run because the first run has not completed yet. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, firstRunName, app.Status.LastRunName) - - // Simulate completion of the first run. - run.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(context.TODO(), run, metav1.UpdateOptions{}) - // This sync should start the next run because the first run has completed. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - secondRunName := app.Status.LastRunName - assert.NotEqual(t, firstRunName, secondRunName) - assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) - assert.Equal(t, firstRunName, app.Status.PastSuccessfulRunNames[0]) - // The second run exists. - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), secondRunName, options) - assert.NotNil(t, run) -} - -func TestSyncScheduledSparkApplication_Replace(t *testing.T) { - // TODO: figure out why the test fails and remove this. - t.Skip() - - app := &v1beta2.ScheduledSparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "test-app-replace", - }, - Spec: v1beta2.ScheduledSparkApplicationSpec{ - Schedule: "@every 1m", - ConcurrencyPolicy: v1beta2.ConcurrencyReplace, - }, - } - c, clk := newFakeController() - c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - key, _ := cache.MetaNamespaceKeyFunc(app) - - options := metav1.GetOptions{} - - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - // The first run should not have been triggered. - assert.True(t, app.Status.LastRunName == "") - - // Advance the clock by 1 minute. - clk.Step(1 * time.Minute) - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - firstRunName := app.Status.LastRunName - // The first run should have been triggered. - assert.True(t, firstRunName != "") - assert.False(t, app.Status.LastRun.IsZero()) - assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) - // The first run exists. - run, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.NotNil(t, run) - - clk.SetTime(app.Status.NextRun.Time.Add(5 * time.Second)) - // This sync should replace the first run with a new run. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - secondRunName := app.Status.LastRunName - assert.NotEqual(t, firstRunName, secondRunName) - // The first run should have been deleted. - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.Nil(t, run) - // The second run exists. - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), secondRunName, options) - assert.NotNil(t, run) -} - -func TestShouldStartNextRun(t *testing.T) { - app := &v1beta2.ScheduledSparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "test-app", - }, - Spec: v1beta2.ScheduledSparkApplicationSpec{ - Schedule: "@every 1m", - }, - Status: v1beta2.ScheduledSparkApplicationStatus{ - LastRunName: "run1", - }, - } - c, _ := newFakeController() - c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - - run1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: app.Namespace, - Name: "run1", - Labels: map[string]string{config.ScheduledSparkAppNameLabel: app.Name}, - }, - } - c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Create(context.TODO(), run1, metav1.CreateOptions{}) - - // ConcurrencyAllow with a running run. - run1.Status.AppState.State = v1beta2.RunningState - c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Update(context.TODO(), run1, metav1.UpdateOptions{}) - app.Spec.ConcurrencyPolicy = v1beta2.ConcurrencyAllow - ok, _ := c.shouldStartNextRun(app) - assert.True(t, ok) - - // ConcurrencyForbid with a running run. - app.Spec.ConcurrencyPolicy = v1beta2.ConcurrencyForbid - ok, _ = c.shouldStartNextRun(app) - assert.False(t, ok) - // ConcurrencyForbid with a completed run. - run1.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Update(context.TODO(), run1, metav1.UpdateOptions{}) - ok, _ = c.shouldStartNextRun(app) - assert.True(t, ok) - - // ConcurrencyReplace with a completed run. - app.Spec.ConcurrencyPolicy = v1beta2.ConcurrencyReplace - ok, _ = c.shouldStartNextRun(app) - assert.True(t, ok) - // ConcurrencyReplace with a running run. - run1.Status.AppState.State = v1beta2.RunningState - c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Update(context.TODO(), run1, metav1.UpdateOptions{}) - ok, _ = c.shouldStartNextRun(app) - assert.True(t, ok) - // The previous running run should have been deleted. - existing, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Get( - context.TODO(), - run1.Name, - metav1.GetOptions{}, - ) - assert.Nil(t, existing) -} - -func TestCheckAndUpdatePastRuns(t *testing.T) { - var two int32 = 2 - app := &v1beta2.ScheduledSparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "test-app", - }, - Spec: v1beta2.ScheduledSparkApplicationSpec{ - Schedule: "@every 1m", - SuccessfulRunHistoryLimit: &two, - FailedRunHistoryLimit: &two, - }, - } - c, _ := newFakeController() - - run1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: app.Namespace, - Name: "run1", - Labels: map[string]string{config.ScheduledSparkAppNameLabel: app.Name}, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.CompletedState, - }, - }, - } - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run1, metav1.CreateOptions{}) - - // The first completed run should have been recorded. - status := app.Status.DeepCopy() - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 1, len(status.PastSuccessfulRunNames)) - assert.Equal(t, run1.Name, status.PastSuccessfulRunNames[0]) - - // The second run that is running should not be recorded. - run2 := run1.DeepCopy() - run2.CreationTimestamp.Time = run1.CreationTimestamp.Add(10 * time.Second) - run2.Name = "run2" - run2.Status.AppState.State = v1beta2.RunningState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run2, metav1.CreateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 1, len(status.PastSuccessfulRunNames)) - assert.Equal(t, run1.Name, status.PastSuccessfulRunNames[0]) - // The second completed run should have been recorded. - run2.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(context.TODO(), run2, metav1.UpdateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 2, len(status.PastSuccessfulRunNames)) - assert.Equal(t, run2.Name, status.PastSuccessfulRunNames[0]) - assert.Equal(t, run1.Name, status.PastSuccessfulRunNames[1]) - // The second completed run has already been recorded, so should not be recorded again. - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 2, len(status.PastSuccessfulRunNames)) - assert.Equal(t, run2.Name, status.PastSuccessfulRunNames[0]) - assert.Equal(t, run1.Name, status.PastSuccessfulRunNames[1]) - // SparkApplications of both of the first two completed runs should exist. - existing, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run2.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run1.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - - // The third completed run should have been recorded. - run3 := run1.DeepCopy() - run3.CreationTimestamp.Time = run2.CreationTimestamp.Add(10 * time.Second) - run3.Name = "run3" - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run3, metav1.CreateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 2, len(status.PastSuccessfulRunNames)) - assert.Equal(t, run3.Name, status.PastSuccessfulRunNames[0]) - assert.Equal(t, run2.Name, status.PastSuccessfulRunNames[1]) - // SparkApplications of the last two completed runs should still exist, - // but the one of the first completed run should have been deleted. - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run3.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run2.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run1.Name, - metav1.GetOptions{}, - ) - assert.Nil(t, existing) - - // The first failed run should have been recorded. - run4 := run1.DeepCopy() - run4.CreationTimestamp.Time = run3.CreationTimestamp.Add(10 * time.Second) - run4.Name = "run4" - run4.Status.AppState.State = v1beta2.FailedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run4, metav1.CreateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 1, len(status.PastFailedRunNames)) - assert.Equal(t, run4.Name, status.PastFailedRunNames[0]) - - // The second failed run should have been recorded. - run5 := run1.DeepCopy() - run5.CreationTimestamp.Time = run4.CreationTimestamp.Add(10 * time.Second) - run5.Name = "run5" - run5.Status.AppState.State = v1beta2.FailedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run5, metav1.CreateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 2, len(status.PastFailedRunNames)) - assert.Equal(t, run5.Name, status.PastFailedRunNames[0]) - assert.Equal(t, run4.Name, status.PastFailedRunNames[1]) - - // The third failed run should have been recorded. - run6 := run1.DeepCopy() - run6.CreationTimestamp.Time = run5.CreationTimestamp.Add(10 * time.Second) - run6.Name = "run6" - run6.Status.AppState.State = v1beta2.FailedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run6, metav1.CreateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 2, len(status.PastFailedRunNames)) - assert.Equal(t, run6.Name, status.PastFailedRunNames[0]) - assert.Equal(t, run5.Name, status.PastFailedRunNames[1]) - // SparkApplications of the last two failed runs should still exist, - // but the one of the first failed run should have been deleted. - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run6.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run5.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run4.Name, - metav1.GetOptions{}, - ) - assert.Nil(t, existing) -} - -func newFakeController() (*Controller, *clocktesting.FakeClock) { - crdClient := crdclientfake.NewSimpleClientset() - kubeClient := kubeclientfake.NewSimpleClientset() - apiExtensionsClient := apiextensionsfake.NewSimpleClientset() - informerFactory := crdinformers.NewSharedInformerFactory(crdClient, 1*time.Second) - clk := clocktesting.NewFakeClock(time.Now()) - controller := NewController(crdClient, kubeClient, apiExtensionsClient, informerFactory, clk) - ssaInformer := informerFactory.Sparkoperator().V1beta2().ScheduledSparkApplications().Informer() - saInformer := informerFactory.Sparkoperator().V1beta2().SparkApplications().Informer() - crdClient.PrependReactor("create", "scheduledsparkapplications", - func(action kubetesting.Action) (bool, runtime.Object, error) { - obj := action.(kubetesting.CreateAction).GetObject() - ssaInformer.GetStore().Add(obj) - return false, obj, nil - }) - crdClient.PrependReactor("update", "scheduledsparkapplications", - func(action kubetesting.Action) (bool, runtime.Object, error) { - obj := action.(kubetesting.UpdateAction).GetObject() - ssaInformer.GetStore().Update(obj) - return false, obj, nil - }) - crdClient.PrependReactor("create", "sparkapplications", - func(action kubetesting.Action) (bool, runtime.Object, error) { - obj := action.(kubetesting.CreateAction).GetObject() - saInformer.GetStore().Add(obj) - return false, obj, nil - }) - crdClient.PrependReactor("update", "sparkapplications", - func(action kubetesting.Action) (bool, runtime.Object, error) { - obj := action.(kubetesting.UpdateAction).GetObject() - saInformer.GetStore().Update(obj) - return false, obj, nil - }) - return controller, clk -} diff --git a/pkg/controller/sparkapplication/controller.go b/pkg/controller/sparkapplication/controller.go deleted file mode 100644 index 3e9b373b8..000000000 --- a/pkg/controller/sparkapplication/controller.go +++ /dev/null @@ -1,1130 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "context" - "fmt" - "os/exec" - "time" - - "github.com/golang/glog" - "github.com/google/uuid" - "golang.org/x/time/rate" - apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - v1 "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" - "k8s.io/client-go/util/workqueue" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/batchscheduler" - schedulerinterface "github.com/kubeflow/spark-operator/pkg/batchscheduler/interface" - crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" - crdscheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" -) - -const ( - sparkExecutorIDLabel = "spark-exec-id" - podAlreadyExistsErrorCode = "code=409" - queueTokenRefillRate = 50 - queueTokenBucketSize = 500 -) - -var ( - keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc - execCommand = exec.Command -) - -// Controller manages instances of SparkApplication. -type Controller struct { - crdClient crdclientset.Interface - kubeClient clientset.Interface - queue workqueue.RateLimitingInterface - cacheSynced cache.InformerSynced - recorder record.EventRecorder - metrics *sparkAppMetrics - applicationLister crdlisters.SparkApplicationLister - podLister v1.PodLister - ingressURLFormat string - ingressClassName string - batchSchedulerMgr *batchscheduler.SchedulerManager - enableUIService bool -} - -// NewController creates a new Controller. -func NewController( - crdClient crdclientset.Interface, - kubeClient clientset.Interface, - crdInformerFactory crdinformers.SharedInformerFactory, - podInformerFactory informers.SharedInformerFactory, - metricsConfig *util.MetricConfig, - namespace string, - ingressURLFormat string, - ingressClassName string, - batchSchedulerMgr *batchscheduler.SchedulerManager, - enableUIService bool) *Controller { - crdscheme.AddToScheme(scheme.Scheme) - - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.V(2).Infof) - eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{ - Interface: kubeClient.CoreV1().Events(namespace), - }) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{Component: "spark-operator"}) - - return newSparkApplicationController(crdClient, kubeClient, crdInformerFactory, podInformerFactory, recorder, metricsConfig, ingressURLFormat, ingressClassName, batchSchedulerMgr, enableUIService) -} - -func newSparkApplicationController( - crdClient crdclientset.Interface, - kubeClient clientset.Interface, - crdInformerFactory crdinformers.SharedInformerFactory, - podInformerFactory informers.SharedInformerFactory, - eventRecorder record.EventRecorder, - metricsConfig *util.MetricConfig, - ingressURLFormat string, - ingressClassName string, - batchSchedulerMgr *batchscheduler.SchedulerManager, - enableUIService bool) *Controller { - queue := workqueue.NewNamedRateLimitingQueue(&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(queueTokenRefillRate), queueTokenBucketSize)}, - "spark-application-controller") - - controller := &Controller{ - crdClient: crdClient, - kubeClient: kubeClient, - recorder: eventRecorder, - queue: queue, - ingressURLFormat: ingressURLFormat, - ingressClassName: ingressClassName, - batchSchedulerMgr: batchSchedulerMgr, - enableUIService: enableUIService, - } - - if metricsConfig != nil { - controller.metrics = newSparkAppMetrics(metricsConfig) - controller.metrics.registerMetrics() - } - - crdInformer := crdInformerFactory.Sparkoperator().V1beta2().SparkApplications() - crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.onAdd, - UpdateFunc: controller.onUpdate, - DeleteFunc: controller.onDelete, - }) - controller.applicationLister = crdInformer.Lister() - - podsInformer := podInformerFactory.Core().V1().Pods() - sparkPodEventHandler := newSparkPodEventHandler(controller.queue.AddRateLimited, controller.applicationLister) - podsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: sparkPodEventHandler.onPodAdded, - UpdateFunc: sparkPodEventHandler.onPodUpdated, - DeleteFunc: sparkPodEventHandler.onPodDeleted, - }) - controller.podLister = podsInformer.Lister() - - controller.cacheSynced = func() bool { - return crdInformer.Informer().HasSynced() && podsInformer.Informer().HasSynced() - } - - return controller -} - -// Start starts the Controller by registering a watcher for SparkApplication objects. -func (c *Controller) Start(workers int, stopCh <-chan struct{}) error { - // Wait for all involved caches to be synced, before processing items from the queue is started. - if !cache.WaitForCacheSync(stopCh, c.cacheSynced) { - return fmt.Errorf("timed out waiting for cache to sync") - } - - glog.Info("Starting the workers of the SparkApplication controller") - for i := 0; i < workers; i++ { - // runWorker will loop until "something bad" happens. Until will then rekick - // the worker after one second. - go wait.Until(c.runWorker, time.Second, stopCh) - } - - return nil -} - -// Stop stops the controller. -func (c *Controller) Stop() { - glog.Info("Stopping the SparkApplication controller") - c.queue.ShutDown() -} - -// Callback function called when a new SparkApplication object gets created. -func (c *Controller) onAdd(obj interface{}) { - app := obj.(*v1beta2.SparkApplication) - glog.Infof("SparkApplication %s/%s was added, enqueuing it for submission", app.Namespace, app.Name) - c.enqueue(app) -} - -func (c *Controller) onUpdate(oldObj, newObj interface{}) { - oldApp := oldObj.(*v1beta2.SparkApplication) - newApp := newObj.(*v1beta2.SparkApplication) - - // The informer will call this function on non-updated resources during resync, avoid - // enqueuing unchanged applications, unless it has expired or is subject to retry. - if oldApp.ResourceVersion == newApp.ResourceVersion && !c.hasApplicationExpired(newApp) && !shouldRetry(newApp) { - return - } - - // The spec has changed. This is currently best effort as we can potentially miss updates - // and end up in an inconsistent state. - if !equality.Semantic.DeepEqual(oldApp.Spec, newApp.Spec) { - // Force-set the application status to Invalidating which handles clean-up and application re-run. - if _, err := c.updateApplicationStatusWithRetries(newApp, func(status *v1beta2.SparkApplicationStatus) { - status.AppState.State = v1beta2.InvalidatingState - }); err != nil { - c.recorder.Eventf( - newApp, - apiv1.EventTypeWarning, - "SparkApplicationSpecUpdateFailed", - "failed to process spec update for SparkApplication %s: %v", - newApp.Name, - err) - return - } - - c.recorder.Eventf( - newApp, - apiv1.EventTypeNormal, - "SparkApplicationSpecUpdateProcessed", - "Successfully processed spec update for SparkApplication %s", - newApp.Name) - } - - glog.V(2).Infof("SparkApplication %s/%s was updated, enqueuing it", newApp.Namespace, newApp.Name) - c.enqueue(newApp) -} - -func (c *Controller) onDelete(obj interface{}) { - var app *v1beta2.SparkApplication - switch obj.(type) { - case *v1beta2.SparkApplication: - app = obj.(*v1beta2.SparkApplication) - case cache.DeletedFinalStateUnknown: - deletedObj := obj.(cache.DeletedFinalStateUnknown).Obj - app = deletedObj.(*v1beta2.SparkApplication) - } - - if app != nil { - c.handleSparkApplicationDeletion(app) - c.recorder.Eventf( - app, - apiv1.EventTypeNormal, - "SparkApplicationDeleted", - "SparkApplication %s was deleted", - app.Name) - } -} - -// runWorker runs a single controller worker. -func (c *Controller) runWorker() { - defer utilruntime.HandleCrash() - for c.processNextItem() { - } -} - -func (c *Controller) processNextItem() bool { - key, quit := c.queue.Get() - - if quit { - return false - } - defer c.queue.Done(key) - - glog.V(2).Infof("Starting processing key: %q", key) - defer glog.V(2).Infof("Ending processing key: %q", key) - err := c.syncSparkApplication(key.(string)) - if err == nil { - // Successfully processed the key or the key was not found so tell the queue to stop tracking - // history for your key. This will reset things like failure counts for per-item rate limiting. - c.queue.Forget(key) - return true - } - - // There was a failure so be sure to report it. This method allows for pluggable error handling - // which can be used for things like cluster-monitoring - utilruntime.HandleError(fmt.Errorf("failed to sync SparkApplication %q: %v", key, err)) - return true -} - -func (c *Controller) getExecutorPods(app *v1beta2.SparkApplication) ([]*apiv1.Pod, error) { - matchLabels := getResourceLabels(app) - matchLabels[config.SparkRoleLabel] = config.SparkExecutorRole - // Fetch all the executor pods for the current run of the application. - selector := labels.SelectorFromSet(labels.Set(matchLabels)) - pods, err := c.podLister.Pods(app.Namespace).List(selector) - if err != nil { - return nil, fmt.Errorf("failed to get pods for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } - return pods, nil -} - -func (c *Controller) getDriverPod(app *v1beta2.SparkApplication) (*apiv1.Pod, error) { - pod, err := c.podLister.Pods(app.Namespace).Get(app.Status.DriverInfo.PodName) - if err == nil { - return pod, nil - } - if !errors.IsNotFound(err) { - return nil, fmt.Errorf("failed to get driver pod %s: %v", app.Status.DriverInfo.PodName, err) - } - - // The driver pod was not found in the informer cache, try getting it directly from the API server. - pod, err = c.kubeClient.CoreV1().Pods(app.Namespace).Get(context.TODO(), app.Status.DriverInfo.PodName, metav1.GetOptions{}) - if err == nil { - return pod, nil - } - if !errors.IsNotFound(err) { - return nil, fmt.Errorf("failed to get driver pod %s: %v", app.Status.DriverInfo.PodName, err) - } - // Driver pod was not found on the API server either. - return nil, nil -} - -// getAndUpdateDriverState finds the driver pod of the application -// and updates the driver state based on the current phase of the pod. -func (c *Controller) getAndUpdateDriverState(app *v1beta2.SparkApplication) error { - // Either the driver pod doesn't exist yet or its name has not been updated. - if app.Status.DriverInfo.PodName == "" { - return fmt.Errorf("empty driver pod name with application state %s", app.Status.AppState.State) - } - - driverPod, err := c.getDriverPod(app) - if err != nil { - return err - } - - if driverPod == nil { - app.Status.AppState.ErrorMessage = "driver pod not found" - app.Status.AppState.State = v1beta2.FailingState - app.Status.TerminationTime = metav1.Now() - return nil - } - - app.Status.SparkApplicationID = getSparkApplicationID(driverPod) - driverState := podStatusToDriverState(driverPod.Status) - - if hasDriverTerminated(driverState) { - if app.Status.TerminationTime.IsZero() { - app.Status.TerminationTime = metav1.Now() - } - if driverState == v1beta2.DriverFailedState { - state := getDriverContainerTerminatedState(driverPod.Status) - if state != nil { - if state.ExitCode != 0 { - app.Status.AppState.ErrorMessage = fmt.Sprintf("driver container failed with ExitCode: %d, Reason: %s", state.ExitCode, state.Reason) - } - } else { - app.Status.AppState.ErrorMessage = "driver container status missing" - } - } - } - - newState := driverStateToApplicationState(driverState) - // Only record a driver event if the application state (derived from the driver pod phase) has changed. - if newState != app.Status.AppState.State { - c.recordDriverEvent(app, driverState, driverPod.Name) - app.Status.AppState.State = newState - } - - return nil -} - -// getAndUpdateExecutorState lists the executor pods of the application -// and updates the executor state based on the current phase of the pods. -func (c *Controller) getAndUpdateExecutorState(app *v1beta2.SparkApplication) error { - pods, err := c.getExecutorPods(app) - if err != nil { - return err - } - - executorStateMap := make(map[string]v1beta2.ExecutorState) - var executorApplicationID string - for _, pod := range pods { - if util.IsExecutorPod(pod) { - newState := podPhaseToExecutorState(pod.Status.Phase) - oldState, exists := app.Status.ExecutorState[pod.Name] - // Only record an executor event if the executor state is new or it has changed. - if !exists || newState != oldState { - if newState == v1beta2.ExecutorFailedState { - execContainerState := getExecutorContainerTerminatedState(pod.Status) - if execContainerState != nil { - c.recordExecutorEvent(app, newState, pod.Name, execContainerState.ExitCode, execContainerState.Reason) - } else { - // If we can't find the container state, - // we need to set the exitCode and the Reason to unambiguous values. - c.recordExecutorEvent(app, newState, pod.Name, -1, "Unknown (Container not Found)") - } - } else { - c.recordExecutorEvent(app, newState, pod.Name) - } - } - executorStateMap[pod.Name] = newState - - if executorApplicationID == "" { - executorApplicationID = getSparkApplicationID(pod) - } - } - } - - // ApplicationID label can be different on driver/executors. Prefer executor ApplicationID if set. - // Refer https://issues.apache.org/jira/projects/SPARK/issues/SPARK-25922 for details. - if executorApplicationID != "" { - app.Status.SparkApplicationID = executorApplicationID - } - - if app.Status.ExecutorState == nil { - app.Status.ExecutorState = make(map[string]v1beta2.ExecutorState) - } - for name, execStatus := range executorStateMap { - app.Status.ExecutorState[name] = execStatus - } - - // Handle missing/deleted executors. - for name, oldStatus := range app.Status.ExecutorState { - _, exists := executorStateMap[name] - if !isExecutorTerminated(oldStatus) && !exists { - if !isDriverRunning(app) { - // If ApplicationState is COMPLETED, in other words, the driver pod has been completed - // successfully. The executor pods terminate and are cleaned up, so we could not found - // the executor pod, under this circumstances, we assume the executor pod are completed. - if app.Status.AppState.State == v1beta2.CompletedState { - app.Status.ExecutorState[name] = v1beta2.ExecutorCompletedState - } else { - glog.Infof("Executor pod %s not found, assuming it was deleted.", name) - app.Status.ExecutorState[name] = v1beta2.ExecutorFailedState - } - } else { - app.Status.ExecutorState[name] = v1beta2.ExecutorUnknownState - } - } - } - - return nil -} - -func (c *Controller) getAndUpdateAppState(app *v1beta2.SparkApplication) error { - if err := c.getAndUpdateDriverState(app); err != nil { - return err - } - if err := c.getAndUpdateExecutorState(app); err != nil { - return err - } - return nil -} - -func (c *Controller) handleSparkApplicationDeletion(app *v1beta2.SparkApplication) { - if c.metrics != nil { - c.metrics.exportMetricsOnDelete(app) - } - // SparkApplication deletion requested, lets delete driver pod. - if err := c.deleteSparkResources(app); err != nil { - glog.Errorf("failed to delete resources associated with deleted SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } -} - -// ShouldRetry determines if SparkApplication in a given state should be retried. -func shouldRetry(app *v1beta2.SparkApplication) bool { - switch app.Status.AppState.State { - case v1beta2.SucceedingState: - return app.Spec.RestartPolicy.Type == v1beta2.Always - case v1beta2.FailingState: - if app.Spec.RestartPolicy.Type == v1beta2.Always { - return true - } else if app.Spec.RestartPolicy.Type == v1beta2.OnFailure { - // We retry if we haven't hit the retry limit. - if app.Spec.RestartPolicy.OnFailureRetries != nil && app.Status.ExecutionAttempts <= *app.Spec.RestartPolicy.OnFailureRetries { - return true - } - } - case v1beta2.FailedSubmissionState: - if app.Spec.RestartPolicy.Type == v1beta2.Always { - return true - } else if app.Spec.RestartPolicy.Type == v1beta2.OnFailure { - // We retry if we haven't hit the retry limit. - if app.Spec.RestartPolicy.OnSubmissionFailureRetries != nil && app.Status.SubmissionAttempts <= *app.Spec.RestartPolicy.OnSubmissionFailureRetries { - return true - } - } - } - return false -} - -// State Machine for SparkApplication: -// +--------------------------------------------------------------------------------------------------------------------+ -// | +---------------------------------------------------------------------------------------------+ | -// | | +----------+ | | -// | | | | | | -// | | | | | | -// | | |Submission| | | -// | | +----> Failed +----+------------------------------------------------------------------+ | | -// | | | | | | | | | -// | | | | | | | | | -// | | | +----^-----+ | +-----------------------------------------+ | | | -// | | | | | | | | | | -// | | | | | | | | | | -// | +-+--+----+ | +-----v--+-+ +----------+ +-----v-----+ +----v--v--+ | -// | | | | | | | | | | | | | -// | | | | | | | | | | | | | -// | | New +---------> Submitted+----------> Running +-----------> Failing +----------> Failed | | -// | | | | | | | | | | | | | -// | | | | | | | | | | | | | -// | | | | | | | | | | | | | -// | +---------+ | +----^-----+ +-----+----+ +-----+-----+ +----------+ | -// | | | | | | -// | | | | | | -// | +------------+ | | +-------------------------------+ | -// | | | | +-----+-----+ | | +-----------+ +----------+ | -// | | | | | Pending | | | | | | | | -// | | | +---+ Rerun <-------+ +---------------->Succeeding +---------->Completed | | -// | |Invalidating| | <-------+ | | | | | -// | | +-------> | | | | | | | -// | | | | | | | | | | | -// | | | +-----------+ | +-----+-----+ +----------+ | -// | +------------+ | | | -// | | | | -// | +-------------------------------+ | -// | | -// +--------------------------------------------------------------------------------------------------------------------+ -func (c *Controller) syncSparkApplication(key string) error { - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return fmt.Errorf("failed to get the namespace and name from key %s: %v", key, err) - } - app, err := c.getSparkApplication(namespace, name) - if err != nil { - return err - } - if app == nil { - // SparkApplication not found. - return nil - } - if !app.DeletionTimestamp.IsZero() { - c.handleSparkApplicationDeletion(app) - return nil - } - - appCopy := app.DeepCopy() - // Apply the default values to the copy. Note that the default values applied - // won't be sent to the API server as we only update the /status subresource. - v1beta2.SetSparkApplicationDefaults(appCopy) - - // Take action based on application state. - switch appCopy.Status.AppState.State { - case v1beta2.NewState: - c.recordSparkApplicationEvent(appCopy) - if err := c.validateSparkApplication(appCopy); err != nil { - appCopy.Status.AppState.State = v1beta2.FailedState - appCopy.Status.AppState.ErrorMessage = err.Error() - } else { - appCopy = c.submitSparkApplication(appCopy) - } - case v1beta2.SucceedingState: - if !shouldRetry(appCopy) { - appCopy.Status.AppState.State = v1beta2.CompletedState - c.recordSparkApplicationEvent(appCopy) - } else { - if err := c.deleteSparkResources(appCopy); err != nil { - glog.Errorf("failed to delete resources associated with SparkApplication %s/%s: %v", - appCopy.Namespace, appCopy.Name, err) - return err - } - appCopy.Status.AppState.State = v1beta2.PendingRerunState - } - case v1beta2.FailingState: - if !shouldRetry(appCopy) { - appCopy.Status.AppState.State = v1beta2.FailedState - c.recordSparkApplicationEvent(appCopy) - } else if isNextRetryDue(appCopy.Spec.RestartPolicy.OnFailureRetryInterval, appCopy.Status.ExecutionAttempts, appCopy.Status.TerminationTime) { - if err := c.deleteSparkResources(appCopy); err != nil { - glog.Errorf("failed to delete resources associated with SparkApplication %s/%s: %v", - appCopy.Namespace, appCopy.Name, err) - return err - } - appCopy.Status.AppState.State = v1beta2.PendingRerunState - } - case v1beta2.FailedSubmissionState: - if !shouldRetry(appCopy) { - // App will never be retried. Move to terminal FailedState. - appCopy.Status.AppState.State = v1beta2.FailedState - c.recordSparkApplicationEvent(appCopy) - } else if isNextRetryDue(appCopy.Spec.RestartPolicy.OnSubmissionFailureRetryInterval, appCopy.Status.SubmissionAttempts, appCopy.Status.LastSubmissionAttemptTime) { - if c.validateSparkResourceDeletion(appCopy) { - c.submitSparkApplication(appCopy) - } else { - if err := c.deleteSparkResources(appCopy); err != nil { - glog.Errorf("failed to delete resources associated with SparkApplication %s/%s: %v", - appCopy.Namespace, appCopy.Name, err) - return err - } - } - } - case v1beta2.InvalidatingState: - // Invalidate the current run and enqueue the SparkApplication for re-execution. - if err := c.deleteSparkResources(appCopy); err != nil { - glog.Errorf("failed to delete resources associated with SparkApplication %s/%s: %v", - appCopy.Namespace, appCopy.Name, err) - return err - } - c.clearStatus(&appCopy.Status) - appCopy.Status.AppState.State = v1beta2.PendingRerunState - case v1beta2.PendingRerunState: - glog.V(2).Infof("SparkApplication %s/%s is pending rerun", appCopy.Namespace, appCopy.Name) - if c.validateSparkResourceDeletion(appCopy) { - glog.V(2).Infof("Resources for SparkApplication %s/%s successfully deleted", appCopy.Namespace, appCopy.Name) - c.recordSparkApplicationEvent(appCopy) - c.clearStatus(&appCopy.Status) - appCopy = c.submitSparkApplication(appCopy) - } - case v1beta2.SubmittedState, v1beta2.RunningState, v1beta2.UnknownState: - if err := c.getAndUpdateAppState(appCopy); err != nil { - return err - } - case v1beta2.CompletedState, v1beta2.FailedState: - if c.hasApplicationExpired(app) { - glog.Infof("Garbage collecting expired SparkApplication %s/%s", app.Namespace, app.Name) - err := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Delete(context.TODO(), app.Name, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - if err != nil && !errors.IsNotFound(err) { - return err - } - return nil - } - if err := c.getAndUpdateExecutorState(appCopy); err != nil { - return err - } - } - - if appCopy != nil { - err = c.updateStatusAndExportMetrics(app, appCopy) - if err != nil { - glog.Errorf("failed to update SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - return err - } - - if state := appCopy.Status.AppState.State; state == v1beta2.CompletedState || - state == v1beta2.FailedState { - if err := c.cleanUpOnTermination(app, appCopy); err != nil { - glog.Errorf("failed to clean up resources for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - return err - } - } - } - - return nil -} - -// Helper func to determine if the next retry the SparkApplication is due now. -func isNextRetryDue(retryInterval *int64, attemptsDone int32, lastEventTime metav1.Time) bool { - if retryInterval == nil || lastEventTime.IsZero() || attemptsDone <= 0 { - return false - } - - // Retry if we have waited at-least equal to attempts*RetryInterval since we do a linear back-off. - interval := time.Duration(*retryInterval) * time.Second * time.Duration(attemptsDone) - currentTime := time.Now() - glog.V(3).Infof("currentTime is %v, interval is %v", currentTime, interval) - if currentTime.After(lastEventTime.Add(interval)) { - return true - } - return false -} - -// submitSparkApplication creates a new submission for the given SparkApplication and submits it using spark-submit. -func (c *Controller) submitSparkApplication(app *v1beta2.SparkApplication) *v1beta2.SparkApplication { - if app.PrometheusMonitoringEnabled() { - if err := configPrometheusMonitoring(app, c.kubeClient); err != nil { - glog.Error(err) - } - } - - // Use batch scheduler to perform scheduling task before submitting (before build command arguments). - if needScheduling, scheduler := c.shouldDoBatchScheduling(app); needScheduling { - err := scheduler.DoBatchSchedulingOnSubmission(app) - if err != nil { - glog.Errorf("failed to process batch scheduler BeforeSubmitSparkApplication with error %v", err) - return app - } - } - - driverInfo := v1beta2.DriverInfo{} - - if c.enableUIService { - service, err := createSparkUIService(app, c.kubeClient) - if err != nil { - glog.Errorf("failed to create UI service for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } else { - driverInfo.WebUIServiceName = service.serviceName - driverInfo.WebUIPort = service.servicePort - driverInfo.WebUIAddress = fmt.Sprintf("%s:%d", service.serviceIP, app.Status.DriverInfo.WebUIPort) - // Create UI Ingress if ingress-format is set. - if c.ingressURLFormat != "" { - // We are going to want to use an ingress url. - ingressURL, err := getDriverIngressURL(c.ingressURLFormat, app.GetName(), app.GetNamespace()) - if err != nil { - glog.Errorf("failed to get the spark ingress url %s/%s: %v", app.Namespace, app.Name, err) - } else { - // need to ensure the spark.ui variables are configured correctly if a subPath is used. - if ingressURL.Path != "" { - if app.Spec.SparkConf == nil { - app.Spec.SparkConf = make(map[string]string) - } - app.Spec.SparkConf["spark.ui.proxyBase"] = ingressURL.Path - app.Spec.SparkConf["spark.ui.proxyRedirectUri"] = "/" - } - ingress, err := createSparkUIIngress(app, *service, ingressURL, c.ingressClassName, c.kubeClient) - if err != nil { - glog.Errorf("failed to create UI Ingress for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } else { - driverInfo.WebUIIngressAddress = ingress.ingressURL.String() - driverInfo.WebUIIngressName = ingress.ingressName - } - } - } - } - } - - for _, driverIngressConfiguration := range app.Spec.DriverIngressOptions { - service, err := createDriverIngressServiceFromConfiguration(app, &driverIngressConfiguration, c.kubeClient) - if err != nil { - glog.Errorf("failed to create driver ingress service for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - continue - } - glog.Infof("Created driver ingress service %s (port: %d) for SparkApplication %s/%s", service.serviceName, service.servicePort, app.Namespace, app.Name) - // Create ingress if ingress-format is set. - if driverIngressConfiguration.IngressURLFormat != "" { - // We are going to want to use an ingress url. - ingressURL, err := getDriverIngressURL(driverIngressConfiguration.IngressURLFormat, app.GetName(), app.GetNamespace()) - if err != nil { - glog.Errorf("failed to get the driver ingress url %s/%s: %v", app.Namespace, app.Name, err) - } else { - ingress, err := createDriverIngress(app, &driverIngressConfiguration, *service, ingressURL, c.ingressClassName, c.kubeClient) - if err != nil { - glog.Errorf("failed to create driver ingress for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } - glog.Infof("Created driver ingress %s (url: %s) for SparkApplication %s/%s", ingress.ingressName, ingress.ingressURL, app.Namespace, app.Name) - } - } - } - - driverPodName := getDriverPodName(app) - driverInfo.PodName = driverPodName - submissionID := uuid.New().String() - submissionCmdArgs, err := buildSubmissionCommandArgs(app, driverPodName, submissionID) - if err != nil { - app.Status = v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - ErrorMessage: err.Error(), - }, - SubmissionAttempts: app.Status.SubmissionAttempts + 1, - LastSubmissionAttemptTime: metav1.Now(), - } - return app - } - // Try submitting the application by running spark-submit. - submitted, err := runSparkSubmit(newSubmission(submissionCmdArgs, app)) - if err != nil { - app.Status = v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - ErrorMessage: err.Error(), - }, - SubmissionAttempts: app.Status.SubmissionAttempts + 1, - LastSubmissionAttemptTime: metav1.Now(), - } - c.recordSparkApplicationEvent(app) - glog.Errorf("failed to run spark-submit for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - return app - } - if !submitted { - // The application may not have been submitted even if err == nil, e.g., when some - // state update caused an attempt to re-submit the application, in which case no - // error gets returned from runSparkSubmit. If this is the case, we simply return. - return app - } - - glog.Infof("SparkApplication %s/%s has been submitted", app.Namespace, app.Name) - app.Status = v1beta2.SparkApplicationStatus{ - SubmissionID: submissionID, - AppState: v1beta2.ApplicationState{ - State: v1beta2.SubmittedState, - }, - DriverInfo: driverInfo, - SubmissionAttempts: app.Status.SubmissionAttempts + 1, - ExecutionAttempts: app.Status.ExecutionAttempts + 1, - LastSubmissionAttemptTime: metav1.Now(), - } - c.recordSparkApplicationEvent(app) - - return app -} - -func (c *Controller) shouldDoBatchScheduling(app *v1beta2.SparkApplication) (bool, schedulerinterface.BatchScheduler) { - if c.batchSchedulerMgr == nil || app.Spec.BatchScheduler == nil || *app.Spec.BatchScheduler == "" { - return false, nil - } - - scheduler, err := c.batchSchedulerMgr.GetScheduler(*app.Spec.BatchScheduler) - if err != nil { - glog.Errorf("failed to get batch scheduler for name %s, %v", *app.Spec.BatchScheduler, err) - return false, nil - } - return scheduler.ShouldSchedule(app), scheduler -} - -func (c *Controller) updateApplicationStatusWithRetries( - original *v1beta2.SparkApplication, - updateFunc func(status *v1beta2.SparkApplicationStatus)) (*v1beta2.SparkApplication, error) { - toUpdate := original.DeepCopy() - updateErr := wait.ExponentialBackoff(retry.DefaultBackoff, func() (ok bool, err error) { - updateFunc(&toUpdate.Status) - if equality.Semantic.DeepEqual(original.Status, toUpdate.Status) { - return true, nil - } - - toUpdate, err = c.crdClient.SparkoperatorV1beta2().SparkApplications(original.Namespace).UpdateStatus(context.TODO(), toUpdate, metav1.UpdateOptions{}) - if err == nil { - return true, nil - } - if !errors.IsConflict(err) { - return false, err - } - - // There was a conflict updating the SparkApplication, fetch the latest version from the API server. - toUpdate, err = c.crdClient.SparkoperatorV1beta2().SparkApplications(original.Namespace).Get(context.TODO(), original.Name, metav1.GetOptions{}) - if err != nil { - glog.Errorf("failed to get SparkApplication %s/%s: %v", original.Namespace, original.Name, err) - return false, err - } - - // Retry with the latest version. - return false, nil - }) - - if updateErr != nil { - glog.Errorf("failed to update SparkApplication %s/%s: %v", original.Namespace, original.Name, updateErr) - return nil, updateErr - } - - return toUpdate, nil -} - -// updateStatusAndExportMetrics updates the status of the SparkApplication and export the metrics. -func (c *Controller) updateStatusAndExportMetrics(oldApp, newApp *v1beta2.SparkApplication) error { - // Skip update if nothing changed. - if equality.Semantic.DeepEqual(oldApp.Status, newApp.Status) { - return nil - } - - oldStatusJSON, err := printStatus(&oldApp.Status) - if err != nil { - return err - } - newStatusJSON, err := printStatus(&newApp.Status) - if err != nil { - return err - } - - glog.V(2).Infof("Update the status of SparkApplication %s/%s from:\n%s\nto:\n%s", newApp.Namespace, newApp.Name, oldStatusJSON, newStatusJSON) - updatedApp, err := c.updateApplicationStatusWithRetries(oldApp, func(status *v1beta2.SparkApplicationStatus) { - *status = newApp.Status - }) - if err != nil { - return err - } - - // Export metrics if the update was successful. - if c.metrics != nil { - c.metrics.exportMetrics(oldApp, updatedApp) - } - - return nil -} - -func (c *Controller) getSparkApplication(namespace string, name string) (*v1beta2.SparkApplication, error) { - app, err := c.applicationLister.SparkApplications(namespace).Get(name) - if err != nil { - if errors.IsNotFound(err) { - return nil, nil - } - return nil, err - } - return app, nil -} - -// Delete the driver pod and optional UI resources (Service/Ingress) created for the application. -func (c *Controller) deleteSparkResources(app *v1beta2.SparkApplication) error { - driverPodName := app.Status.DriverInfo.PodName - // Derive the driver pod name in case the driver pod name was not recorded in the status, - // which could happen if the status update right after submission failed. - if driverPodName == "" { - driverPodName = getDriverPodName(app) - } - - glog.V(2).Infof("Deleting pod %s in namespace %s", driverPodName, app.Namespace) - err := c.kubeClient.CoreV1().Pods(app.Namespace).Delete(context.TODO(), driverPodName, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { - return err - } - - sparkUIServiceName := app.Status.DriverInfo.WebUIServiceName - if sparkUIServiceName != "" { - glog.V(2).Infof("Deleting Spark UI Service %s in namespace %s", sparkUIServiceName, app.Namespace) - err := c.kubeClient.CoreV1().Services(app.Namespace).Delete(context.TODO(), sparkUIServiceName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - if err != nil && !errors.IsNotFound(err) { - return err - } - } - - sparkUIIngressName := app.Status.DriverInfo.WebUIIngressName - if sparkUIIngressName != "" { - if util.IngressCapabilities.Has("networking.k8s.io/v1") { - glog.V(2).Infof("Deleting Spark UI Ingress %s in namespace %s", sparkUIIngressName, app.Namespace) - err := c.kubeClient.NetworkingV1().Ingresses(app.Namespace).Delete(context.TODO(), sparkUIIngressName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - if err != nil && !errors.IsNotFound(err) { - return err - } - } - if util.IngressCapabilities.Has("extensions/v1beta1") { - glog.V(2).Infof("Deleting extensions/v1beta1 Spark UI Ingress %s in namespace %s", sparkUIIngressName, app.Namespace) - err := c.kubeClient.ExtensionsV1beta1().Ingresses(app.Namespace).Delete(context.TODO(), sparkUIIngressName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - if err != nil && !errors.IsNotFound(err) { - return err - } - } - } - - return nil -} - -func (c *Controller) validateSparkApplication(app *v1beta2.SparkApplication) error { - appSpec := app.Spec - driverSpec := appSpec.Driver - executorSpec := appSpec.Executor - if appSpec.NodeSelector != nil && (driverSpec.NodeSelector != nil || executorSpec.NodeSelector != nil) { - return fmt.Errorf("NodeSelector property can be defined at SparkApplication or at any of Driver,Executor") - } - - var servicePorts map[int32]bool - var ingressURLFormats map[string]bool - for _, item := range appSpec.DriverIngressOptions { - if item.ServicePort == nil { - return fmt.Errorf("DriverIngressOptions has nill ServicePort") - } - if servicePorts[*item.ServicePort] { - return fmt.Errorf("DriverIngressOptions has duplicate ServicePort: %d", *item.ServicePort) - } - servicePorts[*item.ServicePort] = true - - if item.IngressURLFormat == "" { - return fmt.Errorf("DriverIngressOptions has empty IngressURLFormat") - } - if ingressURLFormats[item.IngressURLFormat] { - return fmt.Errorf("DriverIngressOptions has duplicate IngressURLFormat: %s", item.IngressURLFormat) - } - ingressURLFormats[item.IngressURLFormat] = true - } - - return nil -} - -// Validate that any Spark resources (driver/Service/Ingress) created for the application have been deleted. -func (c *Controller) validateSparkResourceDeletion(app *v1beta2.SparkApplication) bool { - driverPodName := app.Status.DriverInfo.PodName - // Derive the driver pod name in case the driver pod name was not recorded in the status, - // which could happen if the status update right after submission failed. - if driverPodName == "" { - driverPodName = getDriverPodName(app) - } - _, err := c.kubeClient.CoreV1().Pods(app.Namespace).Get(context.TODO(), driverPodName, metav1.GetOptions{}) - if err == nil || !errors.IsNotFound(err) { - return false - } - - sparkUIServiceName := app.Status.DriverInfo.WebUIServiceName - if sparkUIServiceName != "" { - _, err := c.kubeClient.CoreV1().Services(app.Namespace).Get(context.TODO(), sparkUIServiceName, metav1.GetOptions{}) - if err == nil || !errors.IsNotFound(err) { - return false - } - } - - sparkUIIngressName := app.Status.DriverInfo.WebUIIngressName - if sparkUIIngressName != "" { - _, err := c.kubeClient.NetworkingV1().Ingresses(app.Namespace).Get(context.TODO(), sparkUIIngressName, metav1.GetOptions{}) - if err == nil || !errors.IsNotFound(err) { - return false - } - } - - return true -} - -func (c *Controller) enqueue(obj interface{}) { - key, err := keyFunc(obj) - if err != nil { - glog.Errorf("failed to get key for %v: %v", obj, err) - return - } - - c.queue.AddRateLimited(key) -} - -func (c *Controller) recordSparkApplicationEvent(app *v1beta2.SparkApplication) { - switch app.Status.AppState.State { - case v1beta2.NewState: - c.recorder.Eventf( - app, - apiv1.EventTypeNormal, - "SparkApplicationAdded", - "SparkApplication %s was added, enqueuing it for submission", - app.Name) - case v1beta2.SubmittedState: - c.recorder.Eventf( - app, - apiv1.EventTypeNormal, - "SparkApplicationSubmitted", - "SparkApplication %s was submitted successfully", - app.Name) - case v1beta2.FailedSubmissionState: - c.recorder.Eventf( - app, - apiv1.EventTypeWarning, - "SparkApplicationSubmissionFailed", - "failed to submit SparkApplication %s: %s", - app.Name, - app.Status.AppState.ErrorMessage) - case v1beta2.CompletedState: - c.recorder.Eventf( - app, - apiv1.EventTypeNormal, - "SparkApplicationCompleted", - "SparkApplication %s completed", - app.Name) - case v1beta2.FailedState: - c.recorder.Eventf( - app, - apiv1.EventTypeWarning, - "SparkApplicationFailed", - "SparkApplication %s failed: %s", - app.Name, - app.Status.AppState.ErrorMessage) - case v1beta2.PendingRerunState: - c.recorder.Eventf( - app, - apiv1.EventTypeWarning, - "SparkApplicationPendingRerun", - "SparkApplication %s is pending rerun", - app.Name) - } -} - -func (c *Controller) recordDriverEvent(app *v1beta2.SparkApplication, phase v1beta2.DriverState, name string) { - switch phase { - case v1beta2.DriverCompletedState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkDriverCompleted", "Driver %s completed", name) - case v1beta2.DriverPendingState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkDriverPending", "Driver %s is pending", name) - case v1beta2.DriverRunningState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkDriverRunning", "Driver %s is running", name) - case v1beta2.DriverFailedState: - c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkDriverFailed", "Driver %s failed", name) - case v1beta2.DriverUnknownState: - c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkDriverUnknownState", "Driver %s in unknown state", name) - } -} - -func (c *Controller) recordExecutorEvent(app *v1beta2.SparkApplication, state v1beta2.ExecutorState, args ...interface{}) { - switch state { - case v1beta2.ExecutorCompletedState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkExecutorCompleted", "Executor %s completed", args) - case v1beta2.ExecutorPendingState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkExecutorPending", "Executor %s is pending", args) - case v1beta2.ExecutorRunningState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkExecutorRunning", "Executor %s is running", args) - case v1beta2.ExecutorFailedState: - c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkExecutorFailed", "Executor %s failed with ExitCode: %d, Reason: %s", args) - case v1beta2.ExecutorUnknownState: - c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkExecutorUnknownState", "Executor %s in unknown state", args) - } -} - -func (c *Controller) clearStatus(status *v1beta2.SparkApplicationStatus) { - if status.AppState.State == v1beta2.InvalidatingState { - status.SparkApplicationID = "" - status.SubmissionAttempts = 0 - status.ExecutionAttempts = 0 - status.LastSubmissionAttemptTime = metav1.Time{} - status.TerminationTime = metav1.Time{} - status.AppState.ErrorMessage = "" - status.ExecutorState = nil - } else if status.AppState.State == v1beta2.PendingRerunState { - status.SparkApplicationID = "" - status.SubmissionAttempts = 0 - status.LastSubmissionAttemptTime = metav1.Time{} - status.DriverInfo = v1beta2.DriverInfo{} - status.AppState.ErrorMessage = "" - status.ExecutorState = nil - } -} - -func (c *Controller) hasApplicationExpired(app *v1beta2.SparkApplication) bool { - // The application has no TTL defined and will never expire. - if app.Spec.TimeToLiveSeconds == nil { - return false - } - - ttl := time.Duration(*app.Spec.TimeToLiveSeconds) * time.Second - now := time.Now() - if !app.Status.TerminationTime.IsZero() && now.Sub(app.Status.TerminationTime.Time) > ttl { - return true - } - - return false -} - -// Clean up when the spark application is terminated. -func (c *Controller) cleanUpOnTermination(oldApp, newApp *v1beta2.SparkApplication) error { - if needScheduling, scheduler := c.shouldDoBatchScheduling(newApp); needScheduling { - if err := scheduler.CleanupOnCompletion(newApp); err != nil { - return err - } - } - return nil -} - -func int64ptr(n int64) *int64 { - return &n -} diff --git a/pkg/controller/sparkapplication/controller_test.go b/pkg/controller/sparkapplication/controller_test.go deleted file mode 100644 index 44f9003db..000000000 --- a/pkg/controller/sparkapplication/controller_test.go +++ /dev/null @@ -1,1674 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "context" - "fmt" - "os" - "os/exec" - "strings" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - prometheus_model "github.com/prometheus/client_model/go" - "github.com/stretchr/testify/assert" - apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/informers" - kubeclientfake "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientfake "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/fake" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" -) - -func newFakeController(app *v1beta2.SparkApplication, pods ...*apiv1.Pod) (*Controller, *record.FakeRecorder) { - crdclientfake.AddToScheme(scheme.Scheme) - crdClient := crdclientfake.NewSimpleClientset() - kubeClient := kubeclientfake.NewSimpleClientset() - util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true} - informerFactory := crdinformers.NewSharedInformerFactory(crdClient, 0*time.Second) - recorder := record.NewFakeRecorder(3) - - kubeClient.CoreV1().Nodes().Create(context.TODO(), &apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Status: apiv1.NodeStatus{ - Addresses: []apiv1.NodeAddress{ - { - Type: apiv1.NodeExternalIP, - Address: "12.34.56.78", - }, - }, - }, - }, metav1.CreateOptions{}) - - podInformerFactory := informers.NewSharedInformerFactory(kubeClient, 0*time.Second) - controller := newSparkApplicationController(crdClient, kubeClient, informerFactory, podInformerFactory, recorder, - &util.MetricConfig{}, "", "", nil, true) - - informer := informerFactory.Sparkoperator().V1beta2().SparkApplications().Informer() - if app != nil { - informer.GetIndexer().Add(app) - } - - podInformer := podInformerFactory.Core().V1().Pods().Informer() - for _, pod := range pods { - if pod != nil { - podInformer.GetIndexer().Add(pod) - } - } - return controller, recorder -} - -func TestOnAdd(t *testing.T) { - ctrl, _ := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{}, - } - ctrl.onAdd(app) - - item, _ := ctrl.queue.Get() - defer ctrl.queue.Done(item) - key, ok := item.(string) - assert.True(t, ok) - expectedKey, _ := cache.MetaNamespaceKeyFunc(app) - assert.Equal(t, expectedKey, key) - ctrl.queue.Forget(item) -} - -func TestOnUpdate(t *testing.T) { - ctrl, recorder := newFakeController(nil) - - appTemplate := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - ResourceVersion: "1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Mode: v1beta2.ClusterMode, - Image: stringptr("foo-image:v1"), - Executor: v1beta2.ExecutorSpec{ - Instances: int32ptr(1), - }, - }, - } - - // Case1: Same Spec. - copyWithSameSpec := appTemplate.DeepCopy() - copyWithSameSpec.Status.ExecutionAttempts = 3 - copyWithSameSpec.ResourceVersion = "2" - - ctrl.onUpdate(appTemplate, copyWithSameSpec) - - // Verify that the SparkApplication was enqueued but no spec update events fired. - item, _ := ctrl.queue.Get() - key, ok := item.(string) - assert.True(t, ok) - expectedKey, _ := cache.MetaNamespaceKeyFunc(appTemplate) - assert.Equal(t, expectedKey, key) - ctrl.queue.Forget(item) - ctrl.queue.Done(item) - assert.Equal(t, 0, len(recorder.Events)) - - // Case2: Spec update failed. - copyWithSpecUpdate := appTemplate.DeepCopy() - copyWithSpecUpdate.Spec.Image = stringptr("foo-image:v2") - copyWithSpecUpdate.ResourceVersion = "2" - - ctrl.onUpdate(appTemplate, copyWithSpecUpdate) - - // Verify that update failed due to non-existence of SparkApplication. - assert.Equal(t, 1, len(recorder.Events)) - event := <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationSpecUpdateFailed")) - - // Case3: Spec update successful. - ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(appTemplate.Namespace).Create(context.TODO(), appTemplate, metav1.CreateOptions{}) - ctrl.onUpdate(appTemplate, copyWithSpecUpdate) - - // Verify App was enqueued. - item, _ = ctrl.queue.Get() - key, ok = item.(string) - assert.True(t, ok) - expectedKey, _ = cache.MetaNamespaceKeyFunc(appTemplate) - assert.Equal(t, expectedKey, key) - ctrl.queue.Forget(item) - ctrl.queue.Done(item) - // Verify that update was succeeded. - assert.Equal(t, 1, len(recorder.Events)) - event = <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationSpecUpdateProcessed")) - - // Verify the SparkApplication state was updated to InvalidatingState. - app, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(appTemplate.Namespace).Get(context.TODO(), appTemplate.Name, metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, v1beta2.InvalidatingState, app.Status.AppState.State) -} - -func TestOnDelete(t *testing.T) { - ctrl, recorder := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{}, - } - ctrl.onAdd(app) - ctrl.queue.Get() - - ctrl.onDelete(app) - ctrl.queue.ShutDown() - item, _ := ctrl.queue.Get() - defer ctrl.queue.Done(item) - assert.True(t, item == nil) - event := <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationDeleted")) - ctrl.queue.Forget(item) -} - -func TestHelperProcessFailure(t *testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - return - } - os.Exit(2) -} - -func TestHelperProcessSuccess(t *testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - return - } - os.Exit(0) -} - -func fetchCounterValue(m *prometheus.CounterVec, labels map[string]string) float64 { - pb := &prometheus_model.Metric{} - m.With(labels).Write(pb) - - return pb.GetCounter().GetValue() -} - -type metrics struct { - submitMetricCount float64 - runningMetricCount float64 - successMetricCount float64 - failedMetricCount float64 -} - -type executorMetrics struct { - runningMetricCount float64 - successMetricCount float64 - failedMetricCount float64 -} - -func TestSyncSparkApplication_SubmissionFailed(t *testing.T) { - os.Setenv(sparkHomeEnvVar, "/spark") - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - restartPolicyOnFailure := v1beta2.RestartPolicy{ - Type: v1beta2.OnFailure, - OnFailureRetries: int32ptr(1), - OnFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetries: int32ptr(1), - } - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.NewState, - ErrorMessage: "", - }, - }, - } - - ctrl, recorder := newFakeController(app) - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - - execCommand = func(command string, args ...string) *exec.Cmd { - cs := []string{"-test.run=TestHelperProcessFailure", "--", command} - cs = append(cs, args...) - cmd := exec.Command(os.Args[0], cs...) - cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} - return cmd - } - - // Attempt 1 - err = ctrl.syncSparkApplication("default/foo") - updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - - assert.Equal(t, v1beta2.FailedSubmissionState, updatedApp.Status.AppState.State) - assert.Equal(t, int32(1), updatedApp.Status.SubmissionAttempts) - assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppCount, map[string]string{})) - assert.Equal(t, float64(0), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{})) - assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppFailedSubmissionCount, map[string]string{})) - - event := <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationAdded")) - event = <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationSubmissionFailed")) - - // Attempt 2: Retry again. - updatedApp.Status.LastSubmissionAttemptTime = metav1.Time{Time: metav1.Now().Add(-100 * time.Second)} - ctrl, recorder = newFakeController(updatedApp) - _, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), updatedApp, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - err = ctrl.syncSparkApplication("default/foo") - - // Verify that the application failed again. - updatedApp, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, v1beta2.FailedSubmissionState, updatedApp.Status.AppState.State) - assert.Equal(t, int32(2), updatedApp.Status.SubmissionAttempts) - assert.Equal(t, float64(0), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{})) - - event = <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationSubmissionFailed")) - - // Attempt 3: No more retries. - updatedApp.Status.LastSubmissionAttemptTime = metav1.Time{Time: metav1.Now().Add(-100 * time.Second)} - ctrl, recorder = newFakeController(updatedApp) - _, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), updatedApp, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - err = ctrl.syncSparkApplication("default/foo") - - // Verify that the application failed again. - updatedApp, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, v1beta2.FailedState, updatedApp.Status.AppState.State) - // No more submission attempts made. - assert.Equal(t, int32(2), updatedApp.Status.SubmissionAttempts) -} - -func TestValidateDetectsNodeSelectorSuccessNoSelector(t *testing.T) { - ctrl, _ := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - } - - err := ctrl.validateSparkApplication(app) - assert.Nil(t, err) -} - -func TestValidateDetectsNodeSelectorSuccessNodeSelectorAtAppLevel(t *testing.T) { - ctrl, _ := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - }, - } - - err := ctrl.validateSparkApplication(app) - assert.Nil(t, err) -} - -func TestValidateDetectsNodeSelectorSuccessNodeSelectorAtPodLevel(t *testing.T) { - ctrl, _ := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - }, - }, - }, - } - - err := ctrl.validateSparkApplication(app) - assert.Nil(t, err) - - app.Spec.Executor = v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - }, - } - - err = ctrl.validateSparkApplication(app) - assert.Nil(t, err) -} - -func TestValidateDetectsNodeSelectorFailsAppAndPodLevel(t *testing.T) { - ctrl, _ := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - }, - }, - }, - } - - err := ctrl.validateSparkApplication(app) - assert.NotNil(t, err) - - app.Spec.Executor = v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - }, - } - - err = ctrl.validateSparkApplication(app) - assert.NotNil(t, err) -} - -func TestShouldRetry(t *testing.T) { - type testcase struct { - app *v1beta2.SparkApplication - shouldRetry bool - } - - testFn := func(test testcase, t *testing.T) { - shouldRetry := shouldRetry(test.app) - assert.Equal(t, test.shouldRetry, shouldRetry) - } - - restartPolicyAlways := v1beta2.RestartPolicy{ - Type: v1beta2.Always, - OnSubmissionFailureRetryInterval: int64ptr(100), - OnFailureRetryInterval: int64ptr(100), - } - - restartPolicyNever := v1beta2.RestartPolicy{ - Type: v1beta2.Never, - } - - restartPolicyOnFailure := v1beta2.RestartPolicy{ - Type: v1beta2.OnFailure, - OnFailureRetries: int32ptr(1), - OnFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetries: int32ptr(2), - } - - testcases := []testcase{ - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }}, - shouldRetry: false, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.SucceedingState, - }, - }, - }, - shouldRetry: true, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.SucceedingState, - }, - }, - }, - shouldRetry: false, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - }, - }, - shouldRetry: true, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyNever, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - }, - }, - shouldRetry: false, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyNever, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - }, - }, - shouldRetry: false, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - }, - }, - shouldRetry: true, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.PendingRerunState, - }, - }, - }, - shouldRetry: false, - }, - } - - for _, test := range testcases { - testFn(test, t) - } -} - -func TestSyncSparkApplication_SubmissionSuccess(t *testing.T) { - type testcase struct { - app *v1beta2.SparkApplication - expectedState v1beta2.ApplicationStateType - } - os.Setenv(sparkHomeEnvVar, "/spark") - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - testFn := func(test testcase, t *testing.T) { - ctrl, _ := newFakeController(test.app) - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(test.app.Namespace).Create(context.TODO(), test.app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - - execCommand = func(command string, args ...string) *exec.Cmd { - cs := []string{"-test.run=TestHelperProcessSuccess", "--", command} - cs = append(cs, args...) - cmd := exec.Command(os.Args[0], cs...) - cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} - return cmd - } - - err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", test.app.Namespace, test.app.Name)) - assert.Nil(t, err) - updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(test.app.Namespace).Get(context.TODO(), test.app.Name, metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, test.expectedState, updatedApp.Status.AppState.State) - if test.app.Status.AppState.State == v1beta2.NewState { - assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppCount, map[string]string{})) - } - if test.expectedState == v1beta2.SubmittedState { - assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{})) - } - } - - restartPolicyAlways := v1beta2.RestartPolicy{ - Type: v1beta2.Always, - OnSubmissionFailureRetryInterval: int64ptr(100), - OnFailureRetryInterval: int64ptr(100), - } - - restartPolicyNever := v1beta2.RestartPolicy{ - Type: v1beta2.Never, - } - - restartPolicyOnFailure := v1beta2.RestartPolicy{ - Type: v1beta2.OnFailure, - OnFailureRetries: int32ptr(1), - OnFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetries: int32ptr(2), - } - - testcases := []testcase{ - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }}, - expectedState: v1beta2.SubmittedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.SucceedingState, - }, - }, - }, - expectedState: v1beta2.PendingRerunState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.PendingRerunState, - }, - }, - }, - expectedState: v1beta2.SubmittedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - }, - expectedState: v1beta2.FailedSubmissionState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - SubmissionAttempts: 1, - LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - }, - expectedState: v1beta2.SubmittedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - ExecutionAttempts: 1, - TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - }, - expectedState: v1beta2.PendingRerunState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - }, - expectedState: v1beta2.FailingState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyNever, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.InvalidatingState, - }, - TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - }, - expectedState: v1beta2.PendingRerunState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyNever, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.SucceedingState, - }, - }, - }, - expectedState: v1beta2.CompletedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyNever, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.NewState, - }, - }, - }, - expectedState: v1beta2.SubmittedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - ExecutionAttempts: 2, - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.FailedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - ExecutionAttempts: 1, - TerminationTime: metav1.Now(), - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.FailingState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - ExecutionAttempts: 1, - TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.PendingRerunState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - SubmissionAttempts: 3, - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.FailedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - SubmissionAttempts: 1, - LastSubmissionAttemptTime: metav1.Now(), - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.FailedSubmissionState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - SubmissionAttempts: 1, - LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.SubmittedState, - }, - } - - for _, test := range testcases { - testFn(test, t) - } -} - -func TestSyncSparkApplication_ExecutingState(t *testing.T) { - type testcase struct { - appName string - oldAppStatus v1beta2.ApplicationStateType - oldExecutorStatus map[string]v1beta2.ExecutorState - driverPod *apiv1.Pod - executorPod *apiv1.Pod - expectedAppState v1beta2.ApplicationStateType - expectedExecutorState map[string]v1beta2.ExecutorState - expectedAppMetrics metrics - expectedExecutorMetrics executorMetrics - } - - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - appName := "foo" - driverPodName := appName + "-driver" - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: "test", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: v1beta2.RestartPolicy{ - Type: v1beta2.Never, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.SubmittedState, - ErrorMessage: "", - }, - DriverInfo: v1beta2.DriverInfo{ - PodName: driverPodName, - }, - ExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - }, - } - - testcases := []testcase{ - { - appName: appName, - oldAppStatus: v1beta2.SubmittedState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - expectedAppState: v1beta2.FailingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, - expectedAppMetrics: metrics{ - failedMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - failedMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.SubmittedState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.RunningState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{ - runningMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkDriverContainerName, - State: apiv1.ContainerState{ - Running: &apiv1.ContainerStateRunning{}, - }, - }, - { - Name: "sidecar", - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - }, - }, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.RunningState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkDriverContainerName, - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - }, - { - Name: "sidecar", - State: apiv1.ContainerState{ - Running: &apiv1.ContainerStateRunning{}, - }, - }, - }, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.SucceedingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{ - successMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkDriverContainerName, - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 137, - Reason: "OOMKilled", - }, - }, - }, - { - Name: "sidecar", - State: apiv1.ContainerState{ - Running: &apiv1.ContainerStateRunning{}, - }, - }, - }, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.FailingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{ - failedMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodFailed, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkDriverContainerName, - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 137, - Reason: "OOMKilled", - }, - }, - }, - }, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodFailed, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkExecutorContainerName, - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 137, - Reason: "OOMKilled", - }, - }, - }, - }, - }, - }, - expectedAppState: v1beta2.FailingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, - expectedAppMetrics: metrics{ - failedMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - failedMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodFailed, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkDriverContainerName, - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - }, - { - Name: "sidecar", - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 137, - Reason: "OOMKilled", - }, - }, - }, - }, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.SucceedingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{ - successMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.FailingState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, - expectedAppState: v1beta2.FailedState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{}, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.SucceedingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{ - successMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.SucceedingState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppState: v1beta2.CompletedState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{}, - }, - { - appName: appName, - oldAppStatus: v1beta2.SubmittedState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodUnknown, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodPending, - }, - }, - expectedAppState: v1beta2.UnknownState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorPendingState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{}, - }, - { - appName: appName, - oldAppStatus: v1beta2.CompletedState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorPendingState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.CompletedState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - }, - }, - expectedAppState: v1beta2.RunningState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorUnknownState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{}, - }, - } - - testFn := func(test testcase, t *testing.T) { - app.Status.AppState.State = test.oldAppStatus - app.Status.ExecutorState = test.oldExecutorStatus - app.Name = test.appName - app.Status.ExecutionAttempts = 1 - ctrl, _ := newFakeController(app, test.driverPod, test.executorPod) - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - if test.driverPod != nil { - ctrl.kubeClient.CoreV1().Pods(app.Namespace).Create(context.TODO(), test.driverPod, metav1.CreateOptions{}) - } - if test.executorPod != nil { - ctrl.kubeClient.CoreV1().Pods(app.Namespace).Create(context.TODO(), test.executorPod, metav1.CreateOptions{}) - } - - err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name)) - assert.Nil(t, err) - // Verify application and executor states. - updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - assert.Equal(t, test.expectedAppState, updatedApp.Status.AppState.State) - assert.Equal(t, test.expectedExecutorState, updatedApp.Status.ExecutorState) - - // Validate error message if the driver pod failed. - if test.driverPod != nil && test.driverPod.Status.Phase == apiv1.PodFailed { - if len(test.driverPod.Status.ContainerStatuses) > 0 && test.driverPod.Status.ContainerStatuses[0].State.Terminated != nil { - if test.driverPod.Status.ContainerStatuses[0].State.Terminated.ExitCode != 0 { - assert.Equal(t, updatedApp.Status.AppState.ErrorMessage, - fmt.Sprintf("driver container failed with ExitCode: %d, Reason: %s", test.driverPod.Status.ContainerStatuses[0].State.Terminated.ExitCode, test.driverPod.Status.ContainerStatuses[0].State.Terminated.Reason)) - } - } else { - assert.Equal(t, updatedApp.Status.AppState.ErrorMessage, "driver container status missing") - } - } - - // Verify application metrics. - assert.Equal(t, test.expectedAppMetrics.runningMetricCount, ctrl.metrics.sparkAppRunningCount.Value(map[string]string{})) - assert.Equal(t, test.expectedAppMetrics.successMetricCount, fetchCounterValue(ctrl.metrics.sparkAppSuccessCount, map[string]string{})) - assert.Equal(t, test.expectedAppMetrics.submitMetricCount, fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{})) - assert.Equal(t, test.expectedAppMetrics.failedMetricCount, fetchCounterValue(ctrl.metrics.sparkAppFailureCount, map[string]string{})) - - // Verify executor metrics. - assert.Equal(t, test.expectedExecutorMetrics.runningMetricCount, ctrl.metrics.sparkAppExecutorRunningCount.Value(map[string]string{})) - assert.Equal(t, test.expectedExecutorMetrics.successMetricCount, fetchCounterValue(ctrl.metrics.sparkAppExecutorSuccessCount, map[string]string{})) - assert.Equal(t, test.expectedExecutorMetrics.failedMetricCount, fetchCounterValue(ctrl.metrics.sparkAppExecutorFailureCount, map[string]string{})) - } - - for _, test := range testcases { - testFn(test, t) - } -} - -func TestSyncSparkApplication_ApplicationExpired(t *testing.T) { - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - appName := "foo" - driverPodName := appName + "-driver" - - now := time.Now() - terminationTime := now.Add(-2 * time.Second) - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: "test", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: v1beta2.RestartPolicy{ - Type: v1beta2.Never, - }, - TimeToLiveSeconds: int64ptr(1), - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.CompletedState, - ErrorMessage: "", - }, - DriverInfo: v1beta2.DriverInfo{ - PodName: driverPodName, - }, - TerminationTime: metav1.Time{ - Time: terminationTime, - }, - ExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - }, - } - - ctrl, _ := newFakeController(app) - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name)) - assert.Nil(t, err) - - _, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - assert.True(t, errors.IsNotFound(err)) -} - -func TestIsNextRetryDue(t *testing.T) { - // Failure cases. - assert.False(t, isNextRetryDue(nil, 3, metav1.Time{Time: metav1.Now().Add(-100 * time.Second)})) - assert.False(t, isNextRetryDue(int64ptr(5), 0, metav1.Time{Time: metav1.Now().Add(-100 * time.Second)})) - assert.False(t, isNextRetryDue(int64ptr(5), 3, metav1.Time{})) - // Not enough time passed. - assert.False(t, isNextRetryDue(int64ptr(50), 3, metav1.Time{Time: metav1.Now().Add(-100 * time.Second)})) - assert.True(t, isNextRetryDue(int64ptr(50), 3, metav1.Time{Time: metav1.Now().Add(-151 * time.Second)})) -} - -func TestIngressWithSubpathAffectsSparkConfiguration(t *testing.T) { - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - appName := "ingressaffectssparkconfig" - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: "test", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: v1beta2.RestartPolicy{ - Type: v1beta2.Never, - }, - TimeToLiveSeconds: int64ptr(1), - }, - Status: v1beta2.SparkApplicationStatus{}, - } - - ctrl, _ := newFakeController(app) - ctrl.ingressURLFormat = "example.com/{{$appNamespace}}/{{$appName}}" - ctrl.enableUIService = true - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name)) - assert.Nil(t, err) - deployedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - ingresses, err := ctrl.kubeClient.NetworkingV1().Ingresses(app.Namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - t.Fatal(err) - } - if ingresses == nil || ingresses.Items == nil || len(ingresses.Items) != 1 { - t.Fatal("The ingress does not exist, has no items, or wrong amount of items") - } - if ingresses.Items[0].Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Path != "/"+app.Namespace+"/"+app.Name+"(/|$)(.*)" { - t.Fatal("The ingress subpath was not created successfully.") - } - // The controller doesn't sync changes to the sparkConf performed by submitSparkApplication back to the kubernetes API server. - if deployedApp.Spec.SparkConf["spark.ui.proxyBase"] != "/"+app.Namespace+"/"+app.Name { - t.Log("The spark configuration does not reflect the subpath expected by the ingress") - } - if deployedApp.Spec.SparkConf["spark.ui.proxyRedirectUri"] != "/" { - t.Log("The spark configuration does not reflect the proxyRedirectUri expected by the ingress") - } -} - -func TestIngressWithClassName(t *testing.T) { - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - appName := "ingressaffectssparkconfig" - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: "test", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: v1beta2.RestartPolicy{ - Type: v1beta2.Never, - }, - TimeToLiveSeconds: int64ptr(1), - }, - Status: v1beta2.SparkApplicationStatus{}, - } - - ctrl, _ := newFakeController(app) - ctrl.ingressURLFormat = "{{$appNamespace}}.{{$appName}}.example.com" - ctrl.ingressClassName = "nginx" - ctrl.enableUIService = true - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name)) - assert.Nil(t, err) - _, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - ingresses, err := ctrl.kubeClient.NetworkingV1().Ingresses(app.Namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - t.Fatal(err) - } - if ingresses == nil || ingresses.Items == nil || len(ingresses.Items) != 1 { - t.Fatal("The ingress does not exist, has no items, or wrong amount of items") - } - if ingresses.Items[0].Spec.IngressClassName == nil || *ingresses.Items[0].Spec.IngressClassName != "nginx" { - t.Fatal("The ingressClassName does not exists, or wrong value is set") - } -} - -func stringptr(s string) *string { - return &s -} - -func int32ptr(n int32) *int32 { - return &n -} diff --git a/pkg/controller/sparkapplication/driveringress_test.go b/pkg/controller/sparkapplication/driveringress_test.go deleted file mode 100644 index ef1dedc67..000000000 --- a/pkg/controller/sparkapplication/driveringress_test.go +++ /dev/null @@ -1,730 +0,0 @@ -/* -Copyright 2024 spark-operator contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "context" - "fmt" - "reflect" - "testing" - - apiv1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes/fake" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" -) - -func TestCreateDriverIngressService(t *testing.T) { - type testcase struct { - name string - app *v1beta2.SparkApplication - expectedServices []SparkService - expectedSelector map[string]string - expectError bool - } - testFn := func(test testcase, t *testing.T) { - fakeClient := fake.NewSimpleClientset() - util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true} - if len(test.expectedServices) != len(test.app.Spec.DriverIngressOptions) { - t.Errorf("%s: size of test.expectedServices (%d) and test.app.Spec.DriverIngressOptions (%d) is different for %s", - test.name, len(test.expectedServices), len(test.app.Spec.DriverIngressOptions), test.app.Name) - } - for i, driverIngressConfiguration := range test.app.Spec.DriverIngressOptions { - sparkService, err := createDriverIngressServiceFromConfiguration(test.app, &driverIngressConfiguration, fakeClient) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - expectedService := test.expectedServices[i] - if sparkService.serviceName != expectedService.serviceName { - t.Errorf("%s: for service name wanted %s got %s", test.name, expectedService.serviceName, sparkService.serviceName) - } - service, err := fakeClient.CoreV1(). - Services(test.app.Namespace). - Get(context.TODO(), sparkService.serviceName, metav1.GetOptions{}) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - if service.Labels[config.SparkAppNameLabel] != test.app.Name { - t.Errorf("%s: service of app %s has the wrong labels", test.name, test.app.Name) - } - if !reflect.DeepEqual(test.expectedSelector, service.Spec.Selector) { - t.Errorf("%s: for label selector wanted %s got %s", test.name, test.expectedSelector, service.Spec.Selector) - } - if service.Spec.Type != expectedService.serviceType { - t.Errorf("%s: for service type wanted %s got %s", test.name, expectedService.serviceType, service.Spec.Type) - } - if len(service.Spec.Ports) != 1 { - t.Errorf("%s: wanted a single port got %d ports", test.name, len(service.Spec.Ports)) - } - port := service.Spec.Ports[0] - if port.Port != expectedService.servicePort { - t.Errorf("%s: unexpected port wanted %d got %d", test.name, expectedService.servicePort, port.Port) - } - if port.Name != expectedService.servicePortName { - t.Errorf("%s: unexpected port name wanted %s got %s", test.name, expectedService.servicePortName, port.Name) - } - serviceAnnotations := service.ObjectMeta.Annotations - if !reflect.DeepEqual(serviceAnnotations, expectedService.serviceAnnotations) { - t.Errorf("%s: unexpected annotations wanted %s got %s", test.name, expectedService.serviceAnnotations, serviceAnnotations) - } - serviceLabels := service.ObjectMeta.Labels - if !reflect.DeepEqual(serviceLabels, expectedService.serviceLabels) { - t.Errorf("%s: unexpected labels wanted %s got %s", test.name, expectedService.serviceLabels, serviceLabels) - } - } - } - serviceNameFormat := "%s-driver-%d" - portNameFormat := "driver-ing-%d" - app1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo1", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: int32ptr(8888), - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - ExecutionAttempts: 1, - }, - } - app2 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo2", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: int32ptr(8888), - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-2", - ExecutionAttempts: 2, - }, - } - app3 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo3", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: nil, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-3", - }, - } - var appPort int32 = 80 - app4 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo4", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - }, - }, - SparkConf: map[string]string{ - sparkUIPortConfigurationKey: "4041", - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-3", - }, - } - var serviceTypeNodePort apiv1.ServiceType = apiv1.ServiceTypeNodePort - app5 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo5", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: int32ptr(8888), - ServiceType: &serviceTypeNodePort, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-2", - ExecutionAttempts: 2, - }, - } - appPortName := "http-spark-test" - app6 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo6", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - ServicePortName: &appPortName, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-6", - }, - } - app7 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo7", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: int32ptr(8888), - ServiceAnnotations: map[string]string{ - "key": "value", - }, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-7", - ExecutionAttempts: 1, - }, - } - app8 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo8", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: int32ptr(8888), - ServiceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo8", - "key": "value", - }, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-8", - ExecutionAttempts: 1, - }, - } - testcases := []testcase{ - { - name: "service with custom serviceport and serviceport and target port are same", - app: app1, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: fmt.Sprintf(portNameFormat, *app1.Spec.DriverIngressOptions[0].ServicePort), - servicePort: *app1.Spec.DriverIngressOptions[0].ServicePort, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo1", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(*app1.Spec.DriverIngressOptions[0].ServicePort), - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo1", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with default port", - app: app2, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app2.GetName(), *app2.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: fmt.Sprintf(portNameFormat, *app2.Spec.DriverIngressOptions[0].ServicePort), - servicePort: int32(*app2.Spec.DriverIngressOptions[0].ServicePort), - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo2", - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo2", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom serviceport and serviceport and target port are different", - app: app4, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app4.GetName(), *app4.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: fmt.Sprintf(portNameFormat, *app4.Spec.DriverIngressOptions[0].ServicePort), - servicePort: 80, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo4", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo4", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom servicetype", - app: app5, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app5.GetName(), *app5.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeNodePort, - servicePortName: fmt.Sprintf(portNameFormat, *app5.Spec.DriverIngressOptions[0].ServicePort), - servicePort: *app5.Spec.DriverIngressOptions[0].ServicePort, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo5", - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo5", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom serviceportname", - app: app6, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app6.GetName(), *app6.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: "http-spark-test", - servicePort: int32(80), - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo6", - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo6", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with annotation", - app: app7, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app7.GetName(), *app7.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: fmt.Sprintf(portNameFormat, *app7.Spec.DriverIngressOptions[0].ServicePort), - servicePort: *app7.Spec.DriverIngressOptions[0].ServicePort, - serviceAnnotations: map[string]string{ - "key": "value", - }, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo7", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo7", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom labels", - app: app8, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app8.GetName(), *app8.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: fmt.Sprintf(portNameFormat, *app8.Spec.DriverIngressOptions[0].ServicePort), - servicePort: *app8.Spec.DriverIngressOptions[0].ServicePort, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo8", - "key": "value", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo8", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with bad port configurations", - app: app3, - expectError: true, - expectedServices: []SparkService{{}}, - }, - } - for _, test := range testcases { - testFn(test, t) - } -} - -func TestCreateDriverIngress(t *testing.T) { - type testcase struct { - name string - app *v1beta2.SparkApplication - expectedIngresses []SparkIngress - expectError bool - } - - testFn := func(test testcase, t *testing.T, ingressURLFormat string, ingressClassName string) { - fakeClient := fake.NewSimpleClientset() - if len(test.expectedIngresses) != len(test.app.Spec.DriverIngressOptions) { - t.Errorf("%s: size of test.expectedIngresses (%d) and test.app.Spec.DriverIngressOptions (%d) is different for %s", - test.name, len(test.expectedIngresses), len(test.app.Spec.DriverIngressOptions), test.app.Name) - } - for i, driverIngressConfiguration := range test.app.Spec.DriverIngressOptions { - sparkService, err := createDriverIngressServiceFromConfiguration(test.app, &driverIngressConfiguration, fakeClient) - if err != nil { - t.Fatal(err) - } - ingressURL, err := getDriverIngressURL(ingressURLFormat, test.app.Name, test.app.Namespace) - if err != nil { - t.Fatal(err) - } - sparkIngress, err := createDriverIngress(test.app, &driverIngressConfiguration, *sparkService, ingressURL, ingressClassName, fakeClient) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - expectedIngress := test.expectedIngresses[i] - if sparkIngress.ingressName != expectedIngress.ingressName { - t.Errorf("Ingress name wanted %s got %s", expectedIngress.ingressName, sparkIngress.ingressName) - } - if sparkIngress.ingressURL.String() != expectedIngress.ingressURL.String() { - t.Errorf("Ingress URL wanted %s got %s", expectedIngress.ingressURL, sparkIngress.ingressURL) - } - ingress, err := fakeClient.NetworkingV1().Ingresses(test.app.Namespace). - Get(context.TODO(), sparkIngress.ingressName, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - if len(ingress.Annotations) != 0 { - for key, value := range ingress.Annotations { - if expectedIngress.annotations[key] != ingress.Annotations[key] { - t.Errorf("Expected annotation: %s=%s but found : %s=%s", key, value, key, ingress.Annotations[key]) - } - } - } - if len(ingress.Spec.TLS) != 0 { - for _, ingressTls := range ingress.Spec.TLS { - if ingressTls.Hosts[0] != expectedIngress.ingressTLS[0].Hosts[0] { - t.Errorf("Expected ingressTls host: %s but found : %s", expectedIngress.ingressTLS[0].Hosts[0], ingressTls.Hosts[0]) - } - if ingressTls.SecretName != expectedIngress.ingressTLS[0].SecretName { - t.Errorf("Expected ingressTls secretName: %s but found : %s", expectedIngress.ingressTLS[0].SecretName, ingressTls.SecretName) - } - } - } - if ingress.Labels[config.SparkAppNameLabel] != test.app.Name { - t.Errorf("Ingress of app %s has the wrong labels", test.app.Name) - } - - if len(ingress.Spec.Rules) != 1 { - t.Errorf("No Ingress rules found.") - } - ingressRule := ingress.Spec.Rules[0] - // If we have a path, then the ingress adds capture groups - if ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "" && ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "/" { - expectedIngress.ingressURL.Path = expectedIngress.ingressURL.Path + "(/|$)(.*)" - } - if ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path != expectedIngress.ingressURL.Host+expectedIngress.ingressURL.Path { - t.Errorf("Ingress of app %s has the wrong host %s", ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path, expectedIngress.ingressURL.Host+expectedIngress.ingressURL.Path) - } - - if len(ingressRule.IngressRuleValue.HTTP.Paths) != 1 { - t.Errorf("No Ingress paths found.") - } - ingressPath := ingressRule.IngressRuleValue.HTTP.Paths[0] - if ingressPath.Backend.Service.Name != sparkService.serviceName { - t.Errorf("Service name wanted %s got %s", sparkService.serviceName, ingressPath.Backend.Service.Name) - } - if *ingressPath.PathType != networkingv1.PathTypeImplementationSpecific { - t.Errorf("PathType wanted %s got %s", networkingv1.PathTypeImplementationSpecific, *ingressPath.PathType) - } - if ingressPath.Backend.Service.Port.Number != sparkService.servicePort { - t.Errorf("Service port wanted %v got %v", sparkService.servicePort, ingressPath.Backend.Service.Port.Number) - } - } - } - - ingressNameFormat := "%s-ing-%d" - var appPort int32 = 80 - app1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app2 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app3 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - IngressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, - }, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app4 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - }, - IngressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: ""}, - }, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - - testcases := []testcase{ - { - name: "simple ingress object", - app: app1, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), - }, - }, - expectError: false, - }, - { - name: "ingress with annotations and without tls configuration", - app: app2, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app2.GetName(), *app2.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError(app2.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - }, - }, - expectError: false, - }, - { - name: "ingress with annotations and tls configuration", - app: app3, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app3.GetName(), *app3.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - ingressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, - }, - }, - }, - expectError: false, - }, - { - name: "ingress with incomplete list of annotations", - app: app4, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app4.GetName(), *app4.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - ingressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: ""}, - }, - }, - }, - expectError: true, - }, - } - - for _, test := range testcases { - testFn(test, t, "{{$appName}}.ingress.clusterName.com", "") - } - - testcases = []testcase{ - { - name: "simple ingress object with ingress URL Format with path", - app: app1, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError("ingress.clusterName.com/"+app1.GetNamespace()+"/"+app1.GetName(), t), - annotations: map[string]string{ - "nginx.ingress.kubernetes.io/rewrite-target": "/$2", - }, - }, - }, - expectError: false, - }, - } - - for _, test := range testcases { - testFn(test, t, "ingress.clusterName.com/{{$appNamespace}}/{{$appName}}", "") - } - - testcases = []testcase{ - { - name: "simple ingress object with ingressClassName set", - app: app1, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), - ingressClassName: "nginx", - }, - }, - expectError: false, - }, - } - for _, test := range testcases { - testFn(test, t, "{{$appName}}.ingress.clusterName.com", "nginx") - } -} diff --git a/pkg/controller/sparkapplication/monitoring_config_test.go b/pkg/controller/sparkapplication/monitoring_config_test.go deleted file mode 100644 index 3eb20b8f9..000000000 --- a/pkg/controller/sparkapplication/monitoring_config_test.go +++ /dev/null @@ -1,267 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "context" - "fmt" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" -) - -func TestConfigPrometheusMonitoring(t *testing.T) { - type testcase struct { - app *v1beta2.SparkApplication - metricsProperties string - metricsPropertiesFile string - prometheusConfig string - port string - driverJavaOptions string - executorJavaOptions string - } - - fakeClient := fake.NewSimpleClientset() - testFn := func(test testcase, t *testing.T) { - err := configPrometheusMonitoring(test.app, fakeClient) - if err != nil { - t.Errorf("failed to configure Prometheus monitoring: %v", err) - } - - configMapName := config.GetPrometheusConfigMapName(test.app) - configMap, err := fakeClient.CoreV1().ConfigMaps(test.app.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) - if err != nil { - t.Errorf("failed to get ConfigMap %s: %v", configMapName, err) - } - - if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && - test.app.Spec.Monitoring.MetricsPropertiesFile == nil && - len(configMap.Data) != 2 { - t.Errorf("expected %d data items got %d", 2, len(configMap.Data)) - } - - if test.app.Spec.Monitoring.Prometheus.ConfigFile != nil && - test.app.Spec.Monitoring.MetricsPropertiesFile == nil && - len(configMap.Data) != 1 { - t.Errorf("expected %d data items got %d", 1, len(configMap.Data)) - } - - if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && - test.app.Spec.Monitoring.MetricsPropertiesFile != nil && - len(configMap.Data) != 1 { - t.Errorf("expected %d data items got %d", 1, len(configMap.Data)) - } - - if test.app.Spec.Monitoring.MetricsPropertiesFile == nil && configMap.Data[metricsPropertiesKey] != test.metricsProperties { - t.Errorf("metrics.properties expected %s got %s", test.metricsProperties, configMap.Data[metricsPropertiesKey]) - } - - if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && configMap.Data[prometheusConfigKey] != test.prometheusConfig { - t.Errorf("prometheus.yaml expected %s got %s", test.prometheusConfig, configMap.Data[prometheusConfigKey]) - } - - if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && configMap.Data[prometheusConfigKey] != test.prometheusConfig { - t.Errorf("prometheus.yaml expected %s got %s", test.prometheusConfig, configMap.Data[prometheusConfigKey]) - } - - if test.app.Spec.Monitoring.ExposeDriverMetrics { - if len(test.app.Spec.Driver.Annotations) != 3 { - t.Errorf("expected %d driver annotations got %d", 3, len(test.app.Spec.Driver.Annotations)) - } - if test.app.Spec.Driver.Annotations[prometheusPortAnnotation] != test.port { - t.Errorf("java agent port expected %s got %s", test.port, test.app.Spec.Driver.Annotations[prometheusPortAnnotation]) - } - - if *test.app.Spec.Driver.JavaOptions != test.driverJavaOptions { - t.Errorf("driver Java options expected %s got %s", test.driverJavaOptions, *test.app.Spec.Driver.JavaOptions) - } - } - - if test.app.Spec.Monitoring.ExposeExecutorMetrics { - if len(test.app.Spec.Executor.Annotations) != 3 { - t.Errorf("expected %d driver annotations got %d", 3, len(test.app.Spec.Executor.Annotations)) - } - if test.app.Spec.Executor.Annotations[prometheusPortAnnotation] != test.port { - t.Errorf("java agent port expected %s got %s", test.port, test.app.Spec.Executor.Annotations[prometheusPortAnnotation]) - } - - if *test.app.Spec.Executor.JavaOptions != test.executorJavaOptions { - t.Errorf("driver Java options expected %s got %s", test.executorJavaOptions, *test.app.Spec.Executor.JavaOptions) - } - } - - if test.app.Spec.Monitoring.MetricsPropertiesFile != nil { - if test.app.Spec.SparkConf["spark.metrics.conf"] != test.metricsPropertiesFile { - t.Errorf("expected sparkConf %s got %s", test.metricsPropertiesFile, test.app.Spec.SparkConf["spark.metrics.conf"]) - } - } - } - - testcases := []testcase{ - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "app1", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Monitoring: &v1beta2.MonitoringSpec{ - ExposeDriverMetrics: true, - ExposeExecutorMetrics: true, - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "/prometheus/exporter.jar", - }, - }, - }, - }, - metricsProperties: config.DefaultMetricsProperties, - prometheusConfig: config.DefaultPrometheusConfiguration, - port: fmt.Sprintf("%d", config.DefaultPrometheusJavaAgentPort), - driverJavaOptions: "-javaagent:/prometheus/exporter.jar=8090:/etc/metrics/conf/prometheus.yaml", - executorJavaOptions: "-javaagent:/prometheus/exporter.jar=8090:/etc/metrics/conf/prometheus.yaml", - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "app2", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Executor: v1beta2.ExecutorSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Monitoring: &v1beta2.MonitoringSpec{ - ExposeDriverMetrics: true, - ExposeExecutorMetrics: true, - MetricsProperties: stringptr("testcase2dummy"), - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "/prometheus/exporter.jar", - Port: int32ptr(8091), - Configuration: stringptr("testcase2dummy"), - }, - }, - }, - }, - metricsProperties: "testcase2dummy", - prometheusConfig: "testcase2dummy", - port: "8091", - driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", - executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "app2", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Executor: v1beta2.ExecutorSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Monitoring: &v1beta2.MonitoringSpec{ - ExposeDriverMetrics: true, - ExposeExecutorMetrics: true, - MetricsProperties: stringptr("testcase3dummy"), - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "/prometheus/exporter.jar", - Port: int32ptr(8091), - ConfigFile: stringptr("testcase3dummy.yaml"), - }, - }, - }, - }, - metricsProperties: "testcase3dummy", - port: "8091", - driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase3dummy.yaml", - executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase3dummy.yaml", - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "app2", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Executor: v1beta2.ExecutorSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Monitoring: &v1beta2.MonitoringSpec{ - ExposeDriverMetrics: true, - ExposeExecutorMetrics: true, - MetricsPropertiesFile: stringptr("/testcase4dummy/metrics.properties"), - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "/prometheus/exporter.jar", - Port: int32ptr(8091), - ConfigFile: stringptr("testcase4dummy.yaml"), - }, - }, - }, - }, - metricsPropertiesFile: "/testcase4dummy/metrics.properties", - port: "8091", - driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase4dummy.yaml", - executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase4dummy.yaml", - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "app2", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Executor: v1beta2.ExecutorSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Monitoring: &v1beta2.MonitoringSpec{ - ExposeDriverMetrics: true, - ExposeExecutorMetrics: true, - MetricsPropertiesFile: stringptr("/testcase5dummy/metrics.properties"), - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "/prometheus/exporter.jar", - Port: int32ptr(8091), - }, - }, - }, - }, - metricsPropertiesFile: "/testcase5dummy/metrics.properties", - prometheusConfig: config.DefaultPrometheusConfiguration, - port: "8091", - driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", - executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", - }, - } - - for _, test := range testcases { - testFn(test, t) - } -} diff --git a/pkg/controller/sparkapplication/spark_pod_eventhandler.go b/pkg/controller/sparkapplication/spark_pod_eventhandler.go deleted file mode 100644 index 8ebb398cb..000000000 --- a/pkg/controller/sparkapplication/spark_pod_eventhandler.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "github.com/golang/glog" - - apiv1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/cache" - - crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" -) - -// sparkPodEventHandler monitors Spark executor pods and update the SparkApplication objects accordingly. -type sparkPodEventHandler struct { - applicationLister crdlisters.SparkApplicationLister - // call-back function to enqueue SparkApp key for processing. - enqueueFunc func(appKey interface{}) -} - -// newSparkPodEventHandler creates a new sparkPodEventHandler instance. -func newSparkPodEventHandler(enqueueFunc func(appKey interface{}), lister crdlisters.SparkApplicationLister) *sparkPodEventHandler { - monitor := &sparkPodEventHandler{ - enqueueFunc: enqueueFunc, - applicationLister: lister, - } - return monitor -} - -func (s *sparkPodEventHandler) onPodAdded(obj interface{}) { - pod := obj.(*apiv1.Pod) - glog.V(2).Infof("Pod %s added in namespace %s.", pod.GetName(), pod.GetNamespace()) - s.enqueueSparkAppForUpdate(pod) -} - -func (s *sparkPodEventHandler) onPodUpdated(old, updated interface{}) { - oldPod := old.(*apiv1.Pod) - updatedPod := updated.(*apiv1.Pod) - - if updatedPod.ResourceVersion == oldPod.ResourceVersion { - return - } - glog.V(2).Infof("Pod %s updated in namespace %s.", updatedPod.GetName(), updatedPod.GetNamespace()) - s.enqueueSparkAppForUpdate(updatedPod) - -} - -func (s *sparkPodEventHandler) onPodDeleted(obj interface{}) { - var deletedPod *apiv1.Pod - - switch obj.(type) { - case *apiv1.Pod: - deletedPod = obj.(*apiv1.Pod) - case cache.DeletedFinalStateUnknown: - deletedObj := obj.(cache.DeletedFinalStateUnknown).Obj - deletedPod = deletedObj.(*apiv1.Pod) - } - - if deletedPod == nil { - return - } - glog.V(2).Infof("Pod %s deleted in namespace %s.", deletedPod.GetName(), deletedPod.GetNamespace()) - s.enqueueSparkAppForUpdate(deletedPod) -} - -func (s *sparkPodEventHandler) enqueueSparkAppForUpdate(pod *apiv1.Pod) { - appName, exists := getAppName(pod) - if !exists { - return - } - - if submissionID, exists := pod.Labels[config.SubmissionIDLabel]; exists { - app, err := s.applicationLister.SparkApplications(pod.GetNamespace()).Get(appName) - if err != nil || app.Status.SubmissionID != submissionID { - return - } - } - - appKey := createMetaNamespaceKey(pod.GetNamespace(), appName) - glog.V(2).Infof("Enqueuing SparkApplication %s for app update processing.", appKey) - s.enqueueFunc(appKey) -} diff --git a/pkg/controller/sparkapplication/spark_pod_eventhandler_test.go b/pkg/controller/sparkapplication/spark_pod_eventhandler_test.go deleted file mode 100644 index 2fa836022..000000000 --- a/pkg/controller/sparkapplication/spark_pod_eventhandler_test.go +++ /dev/null @@ -1,288 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - - "github.com/kubeflow/spark-operator/pkg/config" -) - -func TestOnPodAdded(t *testing.T) { - monitor, queue := newMonitor() - - appName := "foo-1" - namespace := "foo-namespace" - driverPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-driver", - Namespace: namespace, - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodPending, - }, - } - go monitor.onPodAdded(driverPod) - - key, _ := queue.Get() - actualNamespace, actualAppName, err := cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) - - appName = "foo-2" - executorPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-driver", - Namespace: "foo-namespace", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - sparkExecutorIDLabel: "1", - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - }, - } - go monitor.onPodAdded(executorPod) - - key, _ = queue.Get() - - actualNamespace, actualAppName, err = cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) -} - -func TestOnPodUpdated(t *testing.T) { - monitor, queue := newMonitor() - - appName := "foo-3" - namespace := "foo-namespace" - oldDriverPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-driver", - Namespace: namespace, - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodPending, - }, - } - newDriverPod := oldDriverPod.DeepCopy() - newDriverPod.ResourceVersion = "2" - newDriverPod.Status.Phase = apiv1.PodSucceeded - go monitor.onPodUpdated(oldDriverPod, newDriverPod) - - key, _ := queue.Get() - - actualNamespace, actualAppName, err := cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) - - appName = "foo-4" - oldExecutorPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-driver", - Namespace: namespace, - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - sparkExecutorIDLabel: "1", - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - }, - } - newExecutorPod := oldExecutorPod.DeepCopy() - newExecutorPod.ResourceVersion = "2" - newExecutorPod.Status.Phase = apiv1.PodFailed - go monitor.onPodUpdated(oldExecutorPod, newExecutorPod) - - key, _ = queue.Get() - - actualNamespace, actualAppName, err = cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) -} - -func TestOnPodDeleted(t *testing.T) { - monitor, queue := newMonitor() - - appName := "foo-5" - namespace := "foo-namespace" - driverPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-driver", - Namespace: namespace, - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - }, - } - go monitor.onPodDeleted(driverPod) - - key, _ := queue.Get() - actualNamespace, actualAppName, err := cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) - - appName = "foo-6" - executorPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-exec-1", - Namespace: namespace, - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - sparkExecutorIDLabel: "1", - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - } - go monitor.onPodDeleted(executorPod) - - key, _ = queue.Get() - actualNamespace, actualAppName, err = cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) -} - -func newMonitor() (*sparkPodEventHandler, workqueue.RateLimitingInterface) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), - "spark-application-controller-test") - monitor := newSparkPodEventHandler(queue.AddRateLimited, nil) - return monitor, queue -} diff --git a/pkg/controller/sparkapplication/sparkapp_metrics.go b/pkg/controller/sparkapplication/sparkapp_metrics.go deleted file mode 100644 index 1dfb309b1..000000000 --- a/pkg/controller/sparkapplication/sparkapp_metrics.go +++ /dev/null @@ -1,336 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "time" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/util" -) - -type sparkAppMetrics struct { - labels []string - prefix string - - sparkAppCount *prometheus.CounterVec - sparkAppSubmitCount *prometheus.CounterVec - sparkAppSuccessCount *prometheus.CounterVec - sparkAppFailureCount *prometheus.CounterVec - sparkAppFailedSubmissionCount *prometheus.CounterVec - sparkAppRunningCount *util.PositiveGauge - - sparkAppSuccessExecutionTime *prometheus.SummaryVec - sparkAppFailureExecutionTime *prometheus.SummaryVec - sparkAppStartLatency *prometheus.SummaryVec - sparkAppStartLatencyHistogram *prometheus.HistogramVec - - sparkAppExecutorRunningCount *util.PositiveGauge - sparkAppExecutorFailureCount *prometheus.CounterVec - sparkAppExecutorSuccessCount *prometheus.CounterVec -} - -func newSparkAppMetrics(metricsConfig *util.MetricConfig) *sparkAppMetrics { - prefix := metricsConfig.MetricsPrefix - labels := metricsConfig.MetricsLabels - validLabels := make([]string, len(labels)) - for i, label := range labels { - validLabels[i] = util.CreateValidMetricNameLabel("", label) - } - - sparkAppCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_count"), - Help: "Total Number of Spark Apps Handled by the Operator", - }, - validLabels, - ) - sparkAppSubmitCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_submit_count"), - Help: "Spark App Submits via the Operator", - }, - validLabels, - ) - sparkAppSuccessCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_success_count"), - Help: "Spark App Success Count via the Operator", - }, - validLabels, - ) - sparkAppFailureCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_failure_count"), - Help: "Spark App Failure Count via the Operator", - }, - validLabels, - ) - sparkAppFailedSubmissionCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_failed_submission_count"), - Help: "Spark App Failed Submission Count via the Operator", - }, - validLabels, - ) - sparkAppSuccessExecutionTime := prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_success_execution_time_microseconds"), - Help: "Spark App Successful Execution Runtime via the Operator", - }, - validLabels, - ) - sparkAppFailureExecutionTime := prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_failure_execution_time_microseconds"), - Help: "Spark App Failed Execution Runtime via the Operator", - }, - validLabels, - ) - sparkAppStartLatency := prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_start_latency_microseconds"), - Help: "Spark App Start Latency via the Operator", - }, - validLabels, - ) - sparkAppStartLatencyHistogram := prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_start_latency_seconds"), - Help: "Spark App Start Latency counts in buckets via the Operator", - Buckets: metricsConfig.MetricsJobStartLatencyBuckets, - }, - validLabels, - ) - sparkAppExecutorSuccessCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_executor_success_count"), - Help: "Spark App Successful Executor Count via the Operator", - }, - validLabels, - ) - sparkAppExecutorFailureCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_executor_failure_count"), - Help: "Spark App Failed Executor Count via the Operator", - }, - validLabels, - ) - sparkAppRunningCount := util.NewPositiveGauge(util.CreateValidMetricNameLabel(prefix, "spark_app_running_count"), - "Spark App Running Count via the Operator", validLabels) - sparkAppExecutorRunningCount := util.NewPositiveGauge(util.CreateValidMetricNameLabel(prefix, - "spark_app_executor_running_count"), "Spark App Running Executor Count via the Operator", validLabels) - - return &sparkAppMetrics{ - labels: validLabels, - prefix: prefix, - sparkAppCount: sparkAppCount, - sparkAppSubmitCount: sparkAppSubmitCount, - sparkAppRunningCount: sparkAppRunningCount, - sparkAppSuccessCount: sparkAppSuccessCount, - sparkAppFailureCount: sparkAppFailureCount, - sparkAppFailedSubmissionCount: sparkAppFailedSubmissionCount, - sparkAppSuccessExecutionTime: sparkAppSuccessExecutionTime, - sparkAppFailureExecutionTime: sparkAppFailureExecutionTime, - sparkAppStartLatency: sparkAppStartLatency, - sparkAppStartLatencyHistogram: sparkAppStartLatencyHistogram, - sparkAppExecutorRunningCount: sparkAppExecutorRunningCount, - sparkAppExecutorSuccessCount: sparkAppExecutorSuccessCount, - sparkAppExecutorFailureCount: sparkAppExecutorFailureCount, - } -} - -func (sm *sparkAppMetrics) registerMetrics() { - util.RegisterMetric(sm.sparkAppCount) - util.RegisterMetric(sm.sparkAppSubmitCount) - util.RegisterMetric(sm.sparkAppSuccessCount) - util.RegisterMetric(sm.sparkAppFailureCount) - util.RegisterMetric(sm.sparkAppSuccessExecutionTime) - util.RegisterMetric(sm.sparkAppFailureExecutionTime) - util.RegisterMetric(sm.sparkAppStartLatency) - util.RegisterMetric(sm.sparkAppStartLatencyHistogram) - util.RegisterMetric(sm.sparkAppExecutorSuccessCount) - util.RegisterMetric(sm.sparkAppExecutorFailureCount) - sm.sparkAppRunningCount.Register() - sm.sparkAppExecutorRunningCount.Register() -} - -func (sm *sparkAppMetrics) exportMetricsOnDelete(oldApp *v1beta2.SparkApplication) { - metricLabels := fetchMetricLabels(oldApp, sm.labels) - oldState := oldApp.Status.AppState.State - if oldState == v1beta2.RunningState { - sm.sparkAppRunningCount.Dec(metricLabels) - } - for executor, oldExecState := range oldApp.Status.ExecutorState { - if oldExecState == v1beta2.ExecutorRunningState { - glog.V(2).Infof("Application is deleted. Decreasing Running Count for Executor %s.", executor) - sm.sparkAppExecutorRunningCount.Dec(metricLabels) - } - } -} - -func (sm *sparkAppMetrics) exportMetrics(oldApp, newApp *v1beta2.SparkApplication) { - metricLabels := fetchMetricLabels(newApp, sm.labels) - - oldState := oldApp.Status.AppState.State - newState := newApp.Status.AppState.State - if newState != oldState { - if oldState == v1beta2.NewState { - if m, err := sm.sparkAppCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - } - - switch newState { - case v1beta2.SubmittedState: - if m, err := sm.sparkAppSubmitCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - case v1beta2.RunningState: - sm.sparkAppRunningCount.Inc(metricLabels) - sm.exportJobStartLatencyMetrics(newApp, metricLabels) - case v1beta2.SucceedingState: - if !newApp.Status.LastSubmissionAttemptTime.Time.IsZero() && !newApp.Status.TerminationTime.Time.IsZero() { - d := newApp.Status.TerminationTime.Time.Sub(newApp.Status.LastSubmissionAttemptTime.Time) - if m, err := sm.sparkAppSuccessExecutionTime.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Observe(float64(d / time.Microsecond)) - } - } - sm.sparkAppRunningCount.Dec(metricLabels) - if m, err := sm.sparkAppSuccessCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - case v1beta2.FailingState: - if !newApp.Status.LastSubmissionAttemptTime.Time.IsZero() && !newApp.Status.TerminationTime.Time.IsZero() { - d := newApp.Status.TerminationTime.Time.Sub(newApp.Status.LastSubmissionAttemptTime.Time) - if m, err := sm.sparkAppFailureExecutionTime.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Observe(float64(d / time.Microsecond)) - } - } - sm.sparkAppRunningCount.Dec(metricLabels) - if m, err := sm.sparkAppFailureCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - case v1beta2.FailedSubmissionState: - if m, err := sm.sparkAppFailedSubmissionCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - } - } - - // In the event that state transitions happened too quickly and the spark app skipped the RUNNING state, the job - // start latency should still be captured. - // Note: There is an edge case that a Submitted state can go directly to a Failing state if the driver pod is - // deleted. This is very unlikely if not being done intentionally, so we choose not to handle it. - if newState != oldState { - if (newState == v1beta2.FailingState || newState == v1beta2.SucceedingState) && oldState == v1beta2.SubmittedState { - // TODO: remove this log once we've gathered some data in prod fleets. - glog.V(2).Infof("Calculating job start latency metrics for edge case transition from %v to %v in app %v in namespace %v.", oldState, newState, newApp.Name, newApp.Namespace) - sm.exportJobStartLatencyMetrics(newApp, metricLabels) - } - } - - oldExecutorStates := oldApp.Status.ExecutorState - // Potential Executor status updates - for executor, newExecState := range newApp.Status.ExecutorState { - switch newExecState { - case v1beta2.ExecutorRunningState: - if oldExecutorStates[executor] != newExecState { - glog.V(2).Infof("Exporting Metrics for Executor %s. OldState: %v NewState: %v", executor, - oldExecutorStates[executor], newExecState) - sm.sparkAppExecutorRunningCount.Inc(metricLabels) - } - case v1beta2.ExecutorCompletedState: - if oldExecutorStates[executor] != newExecState { - glog.V(2).Infof("Exporting Metrics for Executor %s. OldState: %v NewState: %v", executor, - oldExecutorStates[executor], newExecState) - sm.sparkAppExecutorRunningCount.Dec(metricLabels) - if m, err := sm.sparkAppExecutorSuccessCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - } - case v1beta2.ExecutorFailedState: - if oldExecutorStates[executor] != newExecState { - glog.V(2).Infof("Exporting Metrics for Executor %s. OldState: %v NewState: %v", executor, - oldExecutorStates[executor], newExecState) - sm.sparkAppExecutorRunningCount.Dec(metricLabels) - if m, err := sm.sparkAppExecutorFailureCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - } - } - } -} - -func (sm *sparkAppMetrics) exportJobStartLatencyMetrics(app *v1beta2.SparkApplication, labels map[string]string) { - // Expose the job start latency related metrics of an SparkApp only once when it runs for the first time - if app.Status.ExecutionAttempts == 1 { - latency := time.Now().Sub(app.CreationTimestamp.Time) - if m, err := sm.sparkAppStartLatency.GetMetricWith(labels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Observe(float64(latency / time.Microsecond)) - } - if m, err := sm.sparkAppStartLatencyHistogram.GetMetricWith(labels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Observe(float64(latency / time.Second)) - } - } -} - -func fetchMetricLabels(app *v1beta2.SparkApplication, labels []string) map[string]string { - // Convert app labels into ones that can be used as metric labels. - validLabels := make(map[string]string) - for labelKey, v := range app.Labels { - newKey := util.CreateValidMetricNameLabel("", labelKey) - validLabels[newKey] = v - } - - metricLabels := make(map[string]string) - for _, label := range labels { - if value, ok := validLabels[label]; ok { - metricLabels[label] = value - } else if label == "namespace" { // If the "namespace" label is in the metrics config, use it. - metricLabels[label] = app.Namespace - } else { - metricLabels[label] = "Unknown" - } - } - return metricLabels -} diff --git a/pkg/controller/sparkapplication/sparkapp_metrics_test.go b/pkg/controller/sparkapplication/sparkapp_metrics_test.go deleted file mode 100644 index a860d7f41..000000000 --- a/pkg/controller/sparkapplication/sparkapp_metrics_test.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "github.com/kubeflow/spark-operator/pkg/util" - "net/http" - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSparkAppMetrics(t *testing.T) { - http.DefaultServeMux = new(http.ServeMux) - // Test with label containing "-". Expect them to be converted to "_". - metricsConfig := &util.MetricConfig{ - MetricsPrefix: "", - MetricsLabels: []string{"app-id", "namespace"}, - MetricsJobStartLatencyBuckets: []float64{30, 60, 90, 120}, - } - metrics := newSparkAppMetrics(metricsConfig) - app1 := map[string]string{"app_id": "test1", "namespace": "default"} - - var wg sync.WaitGroup - wg.Add(1) - go func() { - for i := 0; i < 10; i++ { - metrics.sparkAppCount.With(app1).Inc() - metrics.sparkAppSubmitCount.With(app1).Inc() - metrics.sparkAppRunningCount.Inc(app1) - metrics.sparkAppSuccessCount.With(app1).Inc() - metrics.sparkAppFailureCount.With(app1).Inc() - metrics.sparkAppFailedSubmissionCount.With(app1).Inc() - metrics.sparkAppSuccessExecutionTime.With(app1).Observe(float64(100 * i)) - metrics.sparkAppFailureExecutionTime.With(app1).Observe(float64(500 * i)) - metrics.sparkAppStartLatency.With(app1).Observe(float64(10 * i)) - metrics.sparkAppStartLatencyHistogram.With(app1).Observe(float64(10 * i)) - metrics.sparkAppExecutorRunningCount.Inc(app1) - metrics.sparkAppExecutorSuccessCount.With(app1).Inc() - metrics.sparkAppExecutorFailureCount.With(app1).Inc() - } - for i := 0; i < 5; i++ { - metrics.sparkAppRunningCount.Dec(app1) - metrics.sparkAppExecutorRunningCount.Dec(app1) - } - wg.Done() - }() - - wg.Wait() - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppCount, app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppSubmitCount, app1)) - assert.Equal(t, float64(5), metrics.sparkAppRunningCount.Value(app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppSuccessCount, app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppFailureCount, app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppFailedSubmissionCount, app1)) - assert.Equal(t, float64(5), metrics.sparkAppExecutorRunningCount.Value(app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppExecutorFailureCount, app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppExecutorSuccessCount, app1)) -} diff --git a/pkg/controller/sparkapplication/sparkapp_util.go b/pkg/controller/sparkapplication/sparkapp_util.go deleted file mode 100644 index 3d2a58f07..000000000 --- a/pkg/controller/sparkapplication/sparkapp_util.go +++ /dev/null @@ -1,218 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "encoding/json" - "fmt" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - apiv1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" -) - -// Helper method to create a key with namespace and appName -func createMetaNamespaceKey(namespace, name string) string { - return fmt.Sprintf("%s/%s", namespace, name) -} - -func getAppName(pod *apiv1.Pod) (string, bool) { - appName, ok := pod.Labels[config.SparkAppNameLabel] - return appName, ok -} - -func getSparkApplicationID(pod *apiv1.Pod) string { - return pod.Labels[config.SparkApplicationSelectorLabel] -} - -func getDriverPodName(app *v1beta2.SparkApplication) string { - name := app.Spec.Driver.PodName - if name != nil && len(*name) > 0 { - return *name - } - - sparkConf := app.Spec.SparkConf - if sparkConf[config.SparkDriverPodNameKey] != "" { - return sparkConf[config.SparkDriverPodNameKey] - } - - return fmt.Sprintf("%s-driver", app.Name) -} - -func getUIServiceType(app *v1beta2.SparkApplication) apiv1.ServiceType { - if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceType != nil { - return *app.Spec.SparkUIOptions.ServiceType - } - return apiv1.ServiceTypeClusterIP -} - -func getDefaultUIServiceName(app *v1beta2.SparkApplication) string { - return fmt.Sprintf("%s-ui-svc", app.Name) -} - -func getDefaultUIIngressName(app *v1beta2.SparkApplication) string { - return fmt.Sprintf("%s-ui-ingress", app.Name) -} - -func getResourceLabels(app *v1beta2.SparkApplication) map[string]string { - labels := map[string]string{config.SparkAppNameLabel: app.Name} - if app.Status.SubmissionID != "" { - labels[config.SubmissionIDLabel] = app.Status.SubmissionID - } - return labels -} - -func getServiceAnnotations(app *v1beta2.SparkApplication) map[string]string { - serviceAnnotations := map[string]string{} - if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceAnnotations != nil { - for key, value := range app.Spec.SparkUIOptions.ServiceAnnotations { - serviceAnnotations[key] = value - } - } - return serviceAnnotations -} - -func getServiceLabels(app *v1beta2.SparkApplication) map[string]string { - serviceLabels := map[string]string{} - if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceLabels != nil { - for key, value := range app.Spec.SparkUIOptions.ServiceLabels { - serviceLabels[key] = value - } - } - return serviceLabels -} - -func getIngressResourceAnnotations(app *v1beta2.SparkApplication) map[string]string { - ingressAnnotations := map[string]string{} - if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.IngressAnnotations != nil { - for key, value := range app.Spec.SparkUIOptions.IngressAnnotations { - ingressAnnotations[key] = value - } - } - return ingressAnnotations -} - -func getIngressTlsHosts(app *v1beta2.SparkApplication) []networkingv1.IngressTLS { - var ingressTls []networkingv1.IngressTLS - if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.IngressTLS != nil { - for _, ingTls := range app.Spec.SparkUIOptions.IngressTLS { - ingressTls = append(ingressTls, ingTls) - } - } - return ingressTls -} - -func podPhaseToExecutorState(podPhase apiv1.PodPhase) v1beta2.ExecutorState { - switch podPhase { - case apiv1.PodPending: - return v1beta2.ExecutorPendingState - case apiv1.PodRunning: - return v1beta2.ExecutorRunningState - case apiv1.PodSucceeded: - return v1beta2.ExecutorCompletedState - case apiv1.PodFailed: - return v1beta2.ExecutorFailedState - default: - return v1beta2.ExecutorUnknownState - } -} - -func isExecutorTerminated(executorState v1beta2.ExecutorState) bool { - return executorState == v1beta2.ExecutorCompletedState || executorState == v1beta2.ExecutorFailedState -} - -func isDriverRunning(app *v1beta2.SparkApplication) bool { - return app.Status.AppState.State == v1beta2.RunningState -} - -func getDriverContainerTerminatedState(podStatus apiv1.PodStatus) *apiv1.ContainerStateTerminated { - return getContainerTerminatedState(config.SparkDriverContainerName, podStatus) -} - -func getExecutorContainerTerminatedState(podStatus apiv1.PodStatus) *apiv1.ContainerStateTerminated { - state := getContainerTerminatedState(config.Spark3DefaultExecutorContainerName, podStatus) - if state == nil { - state = getContainerTerminatedState(config.SparkExecutorContainerName, podStatus) - } - return state -} - -func getContainerTerminatedState(name string, podStatus apiv1.PodStatus) *apiv1.ContainerStateTerminated { - for _, c := range podStatus.ContainerStatuses { - if c.Name == name { - if c.State.Terminated != nil { - return c.State.Terminated - } - return nil - } - } - return nil -} - -func podStatusToDriverState(podStatus apiv1.PodStatus) v1beta2.DriverState { - switch podStatus.Phase { - case apiv1.PodPending: - return v1beta2.DriverPendingState - case apiv1.PodRunning: - state := getDriverContainerTerminatedState(podStatus) - if state != nil { - if state.ExitCode == 0 { - return v1beta2.DriverCompletedState - } - return v1beta2.DriverFailedState - } - return v1beta2.DriverRunningState - case apiv1.PodSucceeded: - return v1beta2.DriverCompletedState - case apiv1.PodFailed: - state := getDriverContainerTerminatedState(podStatus) - if state != nil && state.ExitCode == 0 { - return v1beta2.DriverCompletedState - } - return v1beta2.DriverFailedState - default: - return v1beta2.DriverUnknownState - } -} - -func hasDriverTerminated(driverState v1beta2.DriverState) bool { - return driverState == v1beta2.DriverCompletedState || driverState == v1beta2.DriverFailedState -} - -func driverStateToApplicationState(driverState v1beta2.DriverState) v1beta2.ApplicationStateType { - switch driverState { - case v1beta2.DriverPendingState: - return v1beta2.SubmittedState - case v1beta2.DriverCompletedState: - return v1beta2.SucceedingState - case v1beta2.DriverFailedState: - return v1beta2.FailingState - case v1beta2.DriverRunningState: - return v1beta2.RunningState - default: - return v1beta2.UnknownState - } -} - -func printStatus(status *v1beta2.SparkApplicationStatus) (string, error) { - marshalled, err := json.MarshalIndent(status, "", " ") - if err != nil { - return "", err - } - return string(marshalled), nil -} diff --git a/pkg/controller/sparkapplication/sparkapp_util_test.go b/pkg/controller/sparkapplication/sparkapp_util_test.go deleted file mode 100644 index c1605656e..000000000 --- a/pkg/controller/sparkapplication/sparkapp_util_test.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2020 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "testing" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -var expectedStatusString = `{ - "sparkApplicationId": "test-app", - "submissionID": "test-app-submission", - "lastSubmissionAttemptTime": null, - "terminationTime": null, - "driverInfo": {}, - "applicationState": { - "state": "COMPLETED" - }, - "executorState": { - "executor-1": "COMPLETED" - } -}` - -func TestPrintStatus(t *testing.T) { - status := &v1beta2.SparkApplicationStatus{ - SparkApplicationID: "test-app", - SubmissionID: "test-app-submission", - AppState: v1beta2.ApplicationState{ - State: v1beta2.CompletedState, - }, - ExecutorState: map[string]v1beta2.ExecutorState{ - "executor-1": v1beta2.ExecutorCompletedState, - }, - } - - statusString, err := printStatus(status) - if err != nil { - t.Fail() - } - - if statusString != expectedStatusString { - t.Errorf("status string\n %s is different from expected status string\n %s", statusString, expectedStatusString) - } -} diff --git a/pkg/controller/sparkapplication/sparkui.go b/pkg/controller/sparkapplication/sparkui.go deleted file mode 100644 index b247974da..000000000 --- a/pkg/controller/sparkapplication/sparkui.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "fmt" - "net/url" - "strconv" - - clientset "k8s.io/client-go/kubernetes" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/util" -) - -const ( - sparkUIPortConfigurationKey = "spark.ui.port" - defaultSparkWebUIPort int32 = 4040 - defaultSparkWebUIPortName string = "spark-driver-ui-port" -) - -func createSparkUIIngress(app *v1beta2.SparkApplication, service SparkService, ingressURL *url.URL, ingressClassName string, kubeClient clientset.Interface) (*SparkIngress, error) { - ingressName := getDefaultUIIngressName(app) - if util.IngressCapabilities.Has("networking.k8s.io/v1") { - return createDriverIngress_v1(app, service, ingressName, ingressURL, ingressClassName, kubeClient) - } else { - return createDriverIngress_legacy(app, service, ingressName, ingressURL, kubeClient) - } -} - -func createSparkUIService( - app *v1beta2.SparkApplication, - kubeClient clientset.Interface) (*SparkService, error) { - portName := getUIServicePortName(app) - port, err := getUIServicePort(app) - if err != nil { - return nil, fmt.Errorf("invalid Spark UI servicePort: %d", port) - } - tPort, err := getUITargetPort(app) - if err != nil { - return nil, fmt.Errorf("invalid Spark UI targetPort: %d", tPort) - } - serviceName := getDefaultUIServiceName(app) - serviceType := getUIServiceType(app) - serviceAnnotations := getServiceAnnotations(app) - serviceLabels := getServiceLabels(app) - return createDriverIngressService(app, portName, port, tPort, serviceName, serviceType, serviceAnnotations, serviceLabels, kubeClient) -} - -// getWebUITargetPort attempts to get the Spark web UI port from configuration property spark.ui.port -// in Spec.SparkConf if it is present, otherwise the default port is returned. -// Note that we don't attempt to get the port from Spec.SparkConfigMap. -func getUITargetPort(app *v1beta2.SparkApplication) (int32, error) { - portStr, ok := app.Spec.SparkConf[sparkUIPortConfigurationKey] - if ok { - port, err := strconv.Atoi(portStr) - return int32(port), err - } - return defaultSparkWebUIPort, nil -} - -func getUIServicePort(app *v1beta2.SparkApplication) (int32, error) { - if app.Spec.SparkUIOptions == nil { - return getUITargetPort(app) - } - port := app.Spec.SparkUIOptions.ServicePort - if port != nil { - return *port, nil - } - return defaultSparkWebUIPort, nil -} - -func getUIServicePortName(app *v1beta2.SparkApplication) string { - if app.Spec.SparkUIOptions == nil { - return defaultSparkWebUIPortName - } - portName := app.Spec.SparkUIOptions.ServicePortName - if portName != nil { - return *portName - } - return defaultSparkWebUIPortName -} diff --git a/pkg/controller/sparkapplication/sparkui_test.go b/pkg/controller/sparkapplication/sparkui_test.go deleted file mode 100644 index 6427aa530..000000000 --- a/pkg/controller/sparkapplication/sparkui_test.go +++ /dev/null @@ -1,673 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "context" - "fmt" - "net/url" - "reflect" - "testing" - - apiv1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes/fake" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" -) - -func TestCreateSparkUIService(t *testing.T) { - type testcase struct { - name string - app *v1beta2.SparkApplication - expectedService SparkService - expectedSelector map[string]string - expectError bool - } - testFn := func(test testcase, t *testing.T) { - fakeClient := fake.NewSimpleClientset() - util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true} - sparkService, err := createSparkUIService(test.app, fakeClient) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - if sparkService.serviceName != test.expectedService.serviceName { - t.Errorf("%s: for service name wanted %s got %s", test.name, test.expectedService.serviceName, sparkService.serviceName) - } - service, err := fakeClient.CoreV1(). - Services(test.app.Namespace). - Get(context.TODO(), sparkService.serviceName, metav1.GetOptions{}) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - if service.Labels[config.SparkAppNameLabel] != test.app.Name { - t.Errorf("%s: service of app %s has the wrong labels", test.name, test.app.Name) - } - if !reflect.DeepEqual(test.expectedSelector, service.Spec.Selector) { - t.Errorf("%s: for label selector wanted %s got %s", test.name, test.expectedSelector, service.Spec.Selector) - } - if service.Spec.Type != test.expectedService.serviceType { - t.Errorf("%s: for service type wanted %s got %s", test.name, test.expectedService.serviceType, service.Spec.Type) - } - if len(service.Spec.Ports) != 1 { - t.Errorf("%s: wanted a single port got %d ports", test.name, len(service.Spec.Ports)) - } - port := service.Spec.Ports[0] - if port.Port != test.expectedService.servicePort { - t.Errorf("%s: unexpected port wanted %d got %d", test.name, test.expectedService.servicePort, port.Port) - } - if port.Name != test.expectedService.servicePortName { - t.Errorf("%s: unexpected port name wanted %s got %s", test.name, test.expectedService.servicePortName, port.Name) - } - serviceAnnotations := service.ObjectMeta.Annotations - if !reflect.DeepEqual(serviceAnnotations, test.expectedService.serviceAnnotations) { - t.Errorf("%s: unexpected annotations wanted %s got %s", test.name, test.expectedService.serviceAnnotations, serviceAnnotations) - } - serviceLabels := service.ObjectMeta.Labels - if !reflect.DeepEqual(serviceLabels, test.expectedService.serviceLabels) { - t.Errorf("%s: unexpected labels wanted %s got %s", test.name, test.expectedService.serviceLabels, serviceLabels) - } - } - defaultPort := defaultSparkWebUIPort - defaultPortName := defaultSparkWebUIPortName - app1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo1", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkConf: map[string]string{ - sparkUIPortConfigurationKey: "4041", - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - ExecutionAttempts: 1, - }, - } - app2 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo2", - Namespace: "default", - UID: "foo-123", - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-2", - ExecutionAttempts: 2, - }, - } - app3 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo3", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkConf: map[string]string{ - sparkUIPortConfigurationKey: "4041x", - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-3", - }, - } - var appPort int32 = 80 - app4 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo4", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServicePort: &appPort, - IngressAnnotations: nil, - IngressTLS: nil, - }, - SparkConf: map[string]string{ - sparkUIPortConfigurationKey: "4041", - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-3", - }, - } - var serviceTypeNodePort apiv1.ServiceType = apiv1.ServiceTypeNodePort - app5 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo5", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServiceType: &serviceTypeNodePort, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-2", - ExecutionAttempts: 2, - }, - } - appPortName := "http-spark-test" - app6 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo6", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServicePort: &appPort, - ServicePortName: &appPortName, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-6", - }, - } - app7 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo7", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServiceAnnotations: map[string]string{ - "key": "value", - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-7", - ExecutionAttempts: 1, - }, - } - app8 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo8", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServiceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo8", - "key": "value", - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-8", - ExecutionAttempts: 1, - }, - } - testcases := []testcase{ - { - name: "service with custom serviceport and serviceport and target port are same", - app: app1, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app1.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: defaultPortName, - servicePort: 4041, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo1", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo1", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with default port", - app: app2, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app2.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: defaultPortName, - servicePort: int32(defaultPort), - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo2", - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo2", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom serviceport and serviceport and target port are different", - app: app4, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app4.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: defaultPortName, - servicePort: 80, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo4", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo4", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom servicetype", - app: app5, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app5.GetName()), - serviceType: apiv1.ServiceTypeNodePort, - servicePortName: defaultPortName, - servicePort: int32(defaultPort), - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo5", - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo5", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom serviceportname", - app: app6, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app6.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: "http-spark-test", - servicePort: int32(80), - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo6", - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo6", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with annotation", - app: app7, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app7.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: defaultPortName, - servicePort: defaultPort, - serviceAnnotations: map[string]string{ - "key": "value", - }, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo7", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo7", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom labels", - app: app8, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app8.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: defaultPortName, - servicePort: defaultPort, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo8", - "key": "value", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo8", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with bad port configurations", - app: app3, - expectError: true, - }, - } - for _, test := range testcases { - testFn(test, t) - } -} - -func TestCreateSparkUIIngress(t *testing.T) { - type testcase struct { - name string - app *v1beta2.SparkApplication - expectedIngress SparkIngress - expectError bool - } - - testFn := func(test testcase, t *testing.T, ingressURLFormat string, ingressClassName string) { - fakeClient := fake.NewSimpleClientset() - sparkService, err := createSparkUIService(test.app, fakeClient) - if err != nil { - t.Fatal(err) - } - ingressURL, err := getDriverIngressURL(ingressURLFormat, test.app.Name, test.app.Namespace) - if err != nil { - t.Fatal(err) - } - sparkIngress, err := createSparkUIIngress(test.app, *sparkService, ingressURL, ingressClassName, fakeClient) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - if sparkIngress.ingressName != test.expectedIngress.ingressName { - t.Errorf("Ingress name wanted %s got %s", test.expectedIngress.ingressName, sparkIngress.ingressName) - } - if sparkIngress.ingressURL.String() != test.expectedIngress.ingressURL.String() { - t.Errorf("Ingress URL wanted %s got %s", test.expectedIngress.ingressURL, sparkIngress.ingressURL) - } - ingress, err := fakeClient.NetworkingV1().Ingresses(test.app.Namespace). - Get(context.TODO(), sparkIngress.ingressName, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - if len(ingress.Annotations) != 0 { - for key, value := range ingress.Annotations { - if test.expectedIngress.annotations[key] != ingress.Annotations[key] { - t.Errorf("Expected annotation: %s=%s but found : %s=%s", key, value, key, ingress.Annotations[key]) - } - } - } - if len(ingress.Spec.TLS) != 0 { - for _, ingressTls := range ingress.Spec.TLS { - if ingressTls.Hosts[0] != test.expectedIngress.ingressTLS[0].Hosts[0] { - t.Errorf("Expected ingressTls host: %s but found : %s", test.expectedIngress.ingressTLS[0].Hosts[0], ingressTls.Hosts[0]) - } - if ingressTls.SecretName != test.expectedIngress.ingressTLS[0].SecretName { - t.Errorf("Expected ingressTls secretName: %s but found : %s", test.expectedIngress.ingressTLS[0].SecretName, ingressTls.SecretName) - } - } - } - if ingress.Labels[config.SparkAppNameLabel] != test.app.Name { - t.Errorf("Ingress of app %s has the wrong labels", test.app.Name) - } - - if len(ingress.Spec.Rules) != 1 { - t.Errorf("No Ingress rules found.") - } - ingressRule := ingress.Spec.Rules[0] - // If we have a path, then the ingress adds capture groups - if ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "" && ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "/" { - test.expectedIngress.ingressURL.Path = test.expectedIngress.ingressURL.Path + "(/|$)(.*)" - } - if ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path != test.expectedIngress.ingressURL.Host+test.expectedIngress.ingressURL.Path { - - t.Errorf("Ingress of app %s has the wrong host %s", ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path, test.expectedIngress.ingressURL.Host+test.expectedIngress.ingressURL.Path) - } - - if len(ingressRule.IngressRuleValue.HTTP.Paths) != 1 { - t.Errorf("No Ingress paths found.") - } - ingressPath := ingressRule.IngressRuleValue.HTTP.Paths[0] - if ingressPath.Backend.Service.Name != sparkService.serviceName { - t.Errorf("Service name wanted %s got %s", sparkService.serviceName, ingressPath.Backend.Service.Name) - } - if *ingressPath.PathType != networkingv1.PathTypeImplementationSpecific { - t.Errorf("PathType wanted %s got %s", networkingv1.PathTypeImplementationSpecific, *ingressPath.PathType) - } - if ingressPath.Backend.Service.Port.Number != sparkService.servicePort { - t.Errorf("Service port wanted %v got %v", sparkService.servicePort, ingressPath.Backend.Service.Port.Number) - } - } - - var appPort int32 = 80 - app1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app2 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app3 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - IngressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app4 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - }, - IngressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: ""}, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - - testcases := []testcase{ - { - name: "simple ingress object", - app: app1, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), - ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), - }, - expectError: false, - }, - { - name: "ingress with annotations and without tls configuration", - app: app2, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app2.GetName()), - ingressURL: parseURLAndAssertError(app2.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - }, - expectError: false, - }, - { - name: "ingress with annotations and tls configuration", - app: app3, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app3.GetName()), - ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - ingressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, - }, - }, - expectError: false, - }, - { - name: "ingress with incomplete list of annotations", - app: app4, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app4.GetName()), - ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - ingressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: ""}, - }, - }, - expectError: true, - }, - } - - for _, test := range testcases { - testFn(test, t, "{{$appName}}.ingress.clusterName.com", "") - } - - testcases = []testcase{ - { - name: "simple ingress object with ingress URL Format with path", - app: app1, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), - ingressURL: parseURLAndAssertError("ingress.clusterName.com/"+app1.GetNamespace()+"/"+app1.GetName(), t), - annotations: map[string]string{ - "nginx.ingress.kubernetes.io/rewrite-target": "/$2", - }, - }, - expectError: false, - }, - } - - for _, test := range testcases { - testFn(test, t, "ingress.clusterName.com/{{$appNamespace}}/{{$appName}}", "") - } - - testcases = []testcase{ - { - name: "simple ingress object with ingressClassName set", - app: app1, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), - ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), - ingressClassName: "nginx", - }, - expectError: false, - }, - } - for _, test := range testcases { - testFn(test, t, "{{$appName}}.ingress.clusterName.com", "nginx") - } -} - -func parseURLAndAssertError(testURL string, t *testing.T) *url.URL { - fallbackURL, _ := url.Parse("http://example.com") - parsedURL, err := url.Parse(testURL) - if err != nil { - t.Errorf("failed to parse the url: %s", testURL) - return fallbackURL - } - if parsedURL.Scheme == "" { - //url does not contain any scheme, adding http:// so url.Parse can function correctly - parsedURL, err = url.Parse("http://" + testURL) - if err != nil { - t.Errorf("failed to parse the url: %s", testURL) - return fallbackURL - } - } - return parsedURL -} diff --git a/pkg/controller/sparkapplication/submission.go b/pkg/controller/sparkapplication/submission.go deleted file mode 100644 index 2f3fe1dd7..000000000 --- a/pkg/controller/sparkapplication/submission.go +++ /dev/null @@ -1,532 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - - "github.com/golang/glog" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" -) - -const ( - sparkHomeEnvVar = "SPARK_HOME" - kubernetesServiceHostEnvVar = "KUBERNETES_SERVICE_HOST" - kubernetesServicePortEnvVar = "KUBERNETES_SERVICE_PORT" -) - -// submission includes information of a Spark application to be submitted. -type submission struct { - namespace string - name string - args []string -} - -func newSubmission(args []string, app *v1beta2.SparkApplication) *submission { - return &submission{ - namespace: app.Namespace, - name: app.Name, - args: args, - } -} - -func runSparkSubmit(submission *submission) (bool, error) { - sparkHome, present := os.LookupEnv(sparkHomeEnvVar) - if !present { - glog.Error("SPARK_HOME is not specified") - } - command := filepath.Join(sparkHome, "/bin/spark-submit") - - cmd := execCommand(command, submission.args...) - glog.V(2).Infof("spark-submit arguments: %v", cmd.Args) - output, err := cmd.Output() - glog.V(3).Infof("spark-submit output: %s", string(output)) - if err != nil { - var errorMsg string - if exitErr, ok := err.(*exec.ExitError); ok { - errorMsg = string(exitErr.Stderr) - } - // The driver pod of the application already exists. - if strings.Contains(errorMsg, podAlreadyExistsErrorCode) { - glog.Warningf("trying to resubmit an already submitted SparkApplication %s/%s", submission.namespace, submission.name) - return false, nil - } - if errorMsg != "" { - return false, fmt.Errorf("failed to run spark-submit for SparkApplication %s/%s: %s", submission.namespace, submission.name, errorMsg) - } - return false, fmt.Errorf("failed to run spark-submit for SparkApplication %s/%s: %v", submission.namespace, submission.name, err) - } - - return true, nil -} - -func buildSubmissionCommandArgs(app *v1beta2.SparkApplication, driverPodName string, submissionID string) ([]string, error) { - var args []string - if app.Spec.MainClass != nil { - args = append(args, "--class", *app.Spec.MainClass) - } - masterURL, err := getMasterURL() - if err != nil { - return nil, err - } - - args = append(args, "--master", masterURL) - args = append(args, "--deploy-mode", string(app.Spec.Mode)) - - // Add proxy user - if app.Spec.ProxyUser != nil { - args = append(args, "--proxy-user", *app.Spec.ProxyUser) - } - - args = append(args, "--conf", fmt.Sprintf("%s=%s", config.SparkAppNamespaceKey, app.Namespace)) - args = append(args, "--conf", fmt.Sprintf("%s=%s", config.SparkAppNameKey, app.Name)) - args = append(args, "--conf", fmt.Sprintf("%s=%s", config.SparkDriverPodNameKey, driverPodName)) - - // Add application dependencies. - args = append(args, addDependenciesConfOptions(app)...) - - if app.Spec.Image != nil { - args = append(args, "--conf", - fmt.Sprintf("%s=%s", config.SparkContainerImageKey, *app.Spec.Image)) - } - if app.Spec.ImagePullPolicy != nil { - args = append(args, "--conf", - fmt.Sprintf("%s=%s", config.SparkContainerImagePullPolicyKey, *app.Spec.ImagePullPolicy)) - } - if len(app.Spec.ImagePullSecrets) > 0 { - secretNames := strings.Join(app.Spec.ImagePullSecrets, ",") - args = append(args, "--conf", fmt.Sprintf("%s=%s", config.SparkImagePullSecretKey, secretNames)) - } - if app.Spec.PythonVersion != nil { - args = append(args, "--conf", - fmt.Sprintf("%s=%s", config.SparkPythonVersion, *app.Spec.PythonVersion)) - } - if app.Spec.MemoryOverheadFactor != nil { - args = append(args, "--conf", - fmt.Sprintf("%s=%s", config.SparkMemoryOverheadFactor, *app.Spec.MemoryOverheadFactor)) - } - - // Operator triggered spark-submit should never wait for App completion - args = append(args, "--conf", fmt.Sprintf("%s=false", config.SparkWaitAppCompletion)) - - // Add Spark configuration properties. - for key, value := range app.Spec.SparkConf { - // Configuration property for the driver pod name has already been set. - if key != config.SparkDriverPodNameKey { - args = append(args, "--conf", fmt.Sprintf("%s=%s", key, value)) - } - } - - // Add Hadoop configuration properties. - for key, value := range app.Spec.HadoopConf { - args = append(args, "--conf", fmt.Sprintf("spark.hadoop.%s=%s", key, value)) - } - - // Add the driver and executor configuration options. - // Note that when the controller submits the application, it expects that all dependencies are local - // so init-container is not needed and therefore no init-container image needs to be specified. - options, err := addDriverConfOptions(app, submissionID) - if err != nil { - return nil, err - } - for _, option := range options { - args = append(args, "--conf", option) - } - options, err = addExecutorConfOptions(app, submissionID) - if err != nil { - return nil, err - } - for _, option := range options { - args = append(args, "--conf", option) - } - - options = addDynamicAllocationConfOptions(app) - for _, option := range options { - args = append(args, "--conf", option) - } - - for key, value := range app.Spec.NodeSelector { - conf := fmt.Sprintf("%s%s=%s", config.SparkNodeSelectorKeyPrefix, key, value) - args = append(args, "--conf", conf) - } - - if app.Spec.Volumes != nil { - options, err = addLocalDirConfOptions(app) - if err != nil { - return nil, err - } - - for _, option := range options { - args = append(args, "--conf", option) - } - } - - if app.Spec.MainApplicationFile != nil { - // Add the main application file if it is present. - args = append(args, *app.Spec.MainApplicationFile) - } - - // Add application arguments. - for _, argument := range app.Spec.Arguments { - args = append(args, argument) - } - - return args, nil -} - -func getMasterURL() (string, error) { - kubernetesServiceHost := os.Getenv(kubernetesServiceHostEnvVar) - if kubernetesServiceHost == "" { - return "", fmt.Errorf("environment variable %s is not found", kubernetesServiceHostEnvVar) - } - kubernetesServicePort := os.Getenv(kubernetesServicePortEnvVar) - if kubernetesServicePort == "" { - return "", fmt.Errorf("environment variable %s is not found", kubernetesServicePortEnvVar) - } - // check if the host is IPv6 address - if strings.Contains(kubernetesServiceHost, ":") && !strings.HasPrefix(kubernetesServiceHost, "[") { - return fmt.Sprintf("k8s://https://[%s]:%s", kubernetesServiceHost, kubernetesServicePort), nil - } - return fmt.Sprintf("k8s://https://%s:%s", kubernetesServiceHost, kubernetesServicePort), nil -} - -func getOwnerReference(app *v1beta2.SparkApplication) *metav1.OwnerReference { - controller := true - return &metav1.OwnerReference{ - APIVersion: v1beta2.SchemeGroupVersion.String(), - Kind: reflect.TypeOf(v1beta2.SparkApplication{}).Name(), - Name: app.Name, - UID: app.UID, - Controller: &controller, - } -} - -func addDependenciesConfOptions(app *v1beta2.SparkApplication) []string { - var depsConfOptions []string - - if len(app.Spec.Deps.Jars) > 0 { - depsConfOptions = append(depsConfOptions, "--jars", strings.Join(app.Spec.Deps.Jars, ",")) - } - if len(app.Spec.Deps.Files) > 0 { - depsConfOptions = append(depsConfOptions, "--files", strings.Join(app.Spec.Deps.Files, ",")) - } - if len(app.Spec.Deps.PyFiles) > 0 { - depsConfOptions = append(depsConfOptions, "--py-files", strings.Join(app.Spec.Deps.PyFiles, ",")) - } - if len(app.Spec.Deps.Packages) > 0 { - depsConfOptions = append(depsConfOptions, "--packages", strings.Join(app.Spec.Deps.Packages, ",")) - } - if len(app.Spec.Deps.ExcludePackages) > 0 { - depsConfOptions = append(depsConfOptions, "--exclude-packages", strings.Join(app.Spec.Deps.ExcludePackages, ",")) - } - if len(app.Spec.Deps.Repositories) > 0 { - depsConfOptions = append(depsConfOptions, "--repositories", strings.Join(app.Spec.Deps.Repositories, ",")) - } - - return depsConfOptions -} - -func addDriverConfOptions(app *v1beta2.SparkApplication, submissionID string) ([]string, error) { - var driverConfOptions []string - - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverLabelKeyPrefix, config.SparkAppNameLabel, app.Name)) - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverLabelKeyPrefix, config.LaunchedBySparkOperatorLabel, "true")) - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverLabelKeyPrefix, config.SubmissionIDLabel, submissionID)) - - if app.Spec.Driver.Image != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverContainerImageKey, *app.Spec.Driver.Image)) - } - - if app.Spec.Driver.Cores != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("spark.driver.cores=%d", *app.Spec.Driver.Cores)) - } - if app.Spec.Driver.CoreRequest != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverCoreRequestKey, *app.Spec.Driver.CoreRequest)) - } - if app.Spec.Driver.CoreLimit != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverCoreLimitKey, *app.Spec.Driver.CoreLimit)) - } - if app.Spec.Driver.Memory != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("spark.driver.memory=%s", *app.Spec.Driver.Memory)) - } - if app.Spec.Driver.MemoryOverhead != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("spark.driver.memoryOverhead=%s", *app.Spec.Driver.MemoryOverhead)) - } - - if app.Spec.Driver.ServiceAccount != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverServiceAccountName, *app.Spec.Driver.ServiceAccount)) - } - - if app.Spec.Driver.JavaOptions != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverJavaOptions, *app.Spec.Driver.JavaOptions)) - } - - if app.Spec.Driver.KubernetesMaster != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverKubernetesMaster, *app.Spec.Driver.KubernetesMaster)) - } - - // Populate SparkApplication Labels to Driver - driverLabels := make(map[string]string) - for key, value := range app.Labels { - driverLabels[key] = value - } - for key, value := range app.Spec.Driver.Labels { - driverLabels[key] = value - } - - for key, value := range driverLabels { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverLabelKeyPrefix, key, value)) - } - - for key, value := range app.Spec.Driver.Annotations { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverAnnotationKeyPrefix, key, value)) - } - - for key, value := range app.Spec.Driver.EnvSecretKeyRefs { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s:%s", config.SparkDriverSecretKeyRefKeyPrefix, key, value.Name, value.Key)) - } - - for key, value := range app.Spec.Driver.ServiceAnnotations { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverServiceAnnotationKeyPrefix, key, value)) - } - - for key, value := range app.Spec.Driver.ServiceLabels { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverServiceLabelKeyPrefix, key, value)) - } - - driverConfOptions = append(driverConfOptions, config.GetDriverSecretConfOptions(app)...) - driverConfOptions = append(driverConfOptions, config.GetDriverEnvVarConfOptions(app)...) - - return driverConfOptions, nil -} - -func addExecutorConfOptions(app *v1beta2.SparkApplication, submissionID string) ([]string, error) { - var executorConfOptions []string - - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkExecutorLabelKeyPrefix, config.SparkAppNameLabel, app.Name)) - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkExecutorLabelKeyPrefix, config.LaunchedBySparkOperatorLabel, "true")) - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkExecutorLabelKeyPrefix, config.SubmissionIDLabel, submissionID)) - - if app.Spec.Executor.Instances != nil { - conf := fmt.Sprintf("spark.executor.instances=%d", *app.Spec.Executor.Instances) - executorConfOptions = append(executorConfOptions, conf) - } - - if app.Spec.Executor.Image != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%s", config.SparkExecutorContainerImageKey, *app.Spec.Executor.Image)) - } - - if app.Spec.Executor.Cores != nil { - // Property "spark.executor.cores" does not allow float values. - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("spark.executor.cores=%d", int32(*app.Spec.Executor.Cores))) - } - if app.Spec.Executor.CoreRequest != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%s", config.SparkExecutorCoreRequestKey, *app.Spec.Executor.CoreRequest)) - } - if app.Spec.Executor.CoreLimit != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%s", config.SparkExecutorCoreLimitKey, *app.Spec.Executor.CoreLimit)) - } - if app.Spec.Executor.Memory != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("spark.executor.memory=%s", *app.Spec.Executor.Memory)) - } - if app.Spec.Executor.MemoryOverhead != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("spark.executor.memoryOverhead=%s", *app.Spec.Executor.MemoryOverhead)) - } - - if app.Spec.Executor.ServiceAccount != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%s", config.SparkExecutorAccountName, *app.Spec.Executor.ServiceAccount)) - } - - if app.Spec.Executor.DeleteOnTermination != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%t", config.SparkExecutorDeleteOnTermination, *app.Spec.Executor.DeleteOnTermination)) - } - - // Populate SparkApplication Labels to Executors - executorLabels := make(map[string]string) - for key, value := range app.Labels { - executorLabels[key] = value - } - for key, value := range app.Spec.Executor.Labels { - executorLabels[key] = value - } - for key, value := range executorLabels { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkExecutorLabelKeyPrefix, key, value)) - } - - for key, value := range app.Spec.Executor.Annotations { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkExecutorAnnotationKeyPrefix, key, value)) - } - - for key, value := range app.Spec.Executor.EnvSecretKeyRefs { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s:%s", config.SparkExecutorSecretKeyRefKeyPrefix, key, value.Name, value.Key)) - } - - if app.Spec.Executor.JavaOptions != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%s", config.SparkExecutorJavaOptions, *app.Spec.Executor.JavaOptions)) - } - - executorConfOptions = append(executorConfOptions, config.GetExecutorSecretConfOptions(app)...) - executorConfOptions = append(executorConfOptions, config.GetExecutorEnvVarConfOptions(app)...) - - return executorConfOptions, nil -} - -func addDynamicAllocationConfOptions(app *v1beta2.SparkApplication) []string { - if app.Spec.DynamicAllocation == nil { - return nil - } - - dynamicAllocation := app.Spec.DynamicAllocation - if !dynamicAllocation.Enabled { - return nil - } - - var options []string - options = append(options, fmt.Sprintf("%s=true", config.SparkDynamicAllocationEnabled)) - // Turn on shuffle tracking if dynamic allocation is enabled. - options = append(options, fmt.Sprintf("%s=true", config.SparkDynamicAllocationShuffleTrackingEnabled)) - if dynamicAllocation.InitialExecutors != nil { - options = append(options, fmt.Sprintf("%s=%d", config.SparkDynamicAllocationInitialExecutors, *dynamicAllocation.InitialExecutors)) - } - if dynamicAllocation.MinExecutors != nil { - options = append(options, fmt.Sprintf("%s=%d", config.SparkDynamicAllocationMinExecutors, *dynamicAllocation.MinExecutors)) - } - if dynamicAllocation.MaxExecutors != nil { - options = append(options, fmt.Sprintf("%s=%d", config.SparkDynamicAllocationMaxExecutors, *dynamicAllocation.MaxExecutors)) - } - if dynamicAllocation.ShuffleTrackingTimeout != nil { - options = append(options, fmt.Sprintf("%s=%d", config.SparkDynamicAllocationShuffleTrackingTimeout, *dynamicAllocation.ShuffleTrackingTimeout)) - } - - return options -} - -// addLocalDirConfOptions excludes local dir volumes, update SparkApplication and returns local dir config options -func addLocalDirConfOptions(app *v1beta2.SparkApplication) ([]string, error) { - var localDirConfOptions []string - - sparkLocalVolumes := map[string]v1.Volume{} - var mutateVolumes []v1.Volume - - // Filter local dir volumes - for _, volume := range app.Spec.Volumes { - if strings.HasPrefix(volume.Name, config.SparkLocalDirVolumePrefix) { - sparkLocalVolumes[volume.Name] = volume - } else { - mutateVolumes = append(mutateVolumes, volume) - } - } - app.Spec.Volumes = mutateVolumes - - // Filter local dir volumeMounts and set mutate volume mounts to driver and executor - if app.Spec.Driver.VolumeMounts != nil { - driverMutateVolumeMounts, driverLocalDirConfConfOptions := filterMutateMountVolumes(app.Spec.Driver.VolumeMounts, config.SparkDriverVolumesPrefix, sparkLocalVolumes) - app.Spec.Driver.VolumeMounts = driverMutateVolumeMounts - localDirConfOptions = append(localDirConfOptions, driverLocalDirConfConfOptions...) - } - - if app.Spec.Executor.VolumeMounts != nil { - executorMutateVolumeMounts, executorLocalDirConfConfOptions := filterMutateMountVolumes(app.Spec.Executor.VolumeMounts, config.SparkExecutorVolumesPrefix, sparkLocalVolumes) - app.Spec.Executor.VolumeMounts = executorMutateVolumeMounts - localDirConfOptions = append(localDirConfOptions, executorLocalDirConfConfOptions...) - } - - return localDirConfOptions, nil -} - -func filterMutateMountVolumes(volumeMounts []v1.VolumeMount, prefix string, sparkLocalVolumes map[string]v1.Volume) ([]v1.VolumeMount, []string) { - var mutateMountVolumes []v1.VolumeMount - var localDirConfOptions []string - for _, volumeMount := range volumeMounts { - if volume, ok := sparkLocalVolumes[volumeMount.Name]; ok { - options := buildLocalVolumeOptions(prefix, volume, volumeMount) - for _, option := range options { - localDirConfOptions = append(localDirConfOptions, option) - } - } else { - mutateMountVolumes = append(mutateMountVolumes, volumeMount) - } - } - - return mutateMountVolumes, localDirConfOptions -} - -func buildLocalVolumeOptions(prefix string, volume v1.Volume, volumeMount v1.VolumeMount) []string { - VolumeMountPathTemplate := prefix + "%s.%s.mount.path=%s" - VolumeMountOptionTemplate := prefix + "%s.%s.options.%s=%s" - - var options []string - switch { - case volume.HostPath != nil: - options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "hostPath", volume.Name, volumeMount.MountPath)) - options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "hostPath", volume.Name, "path", volume.HostPath.Path)) - if volume.HostPath.Type != nil { - options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "hostPath", volume.Name, "type", *volume.HostPath.Type)) - } - case volume.EmptyDir != nil: - options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "emptyDir", volume.Name, volumeMount.MountPath)) - if volume.EmptyDir.SizeLimit != nil { - options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "emptyDir", volume.Name, "sizeLimit", volume.EmptyDir.SizeLimit.String())) - } - case volume.PersistentVolumeClaim != nil: - options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "persistentVolumeClaim", volume.Name, volumeMount.MountPath)) - options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "persistentVolumeClaim", volume.Name, "claimName", volume.PersistentVolumeClaim.ClaimName)) - } - - return options -} diff --git a/pkg/controller/sparkapplication/submission_test.go b/pkg/controller/sparkapplication/submission_test.go deleted file mode 100644 index 16c6a1716..000000000 --- a/pkg/controller/sparkapplication/submission_test.go +++ /dev/null @@ -1,695 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "fmt" - "os" - "reflect" - "sort" - "strconv" - "testing" - - "github.com/google/uuid" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" -) - -const ( - VolumeMountPathTemplate = "spark.kubernetes.%s.volumes.%s.%s.mount.path=%s" - VolumeMountOptionPathTemplate = "spark.kubernetes.%s.volumes.%s.%s.options.%s=%s" - SparkDriverLabelAnnotationTemplate = "spark.kubernetes.driver.label.sparkoperator.k8s.io/%s=%s" - SparkDriverLabelTemplate = "spark.kubernetes.driver.label.%s=%s" - SparkDriverServiceLabelTemplate = "spark.kubernetes.driver.service.label.%s=%s" - SparkExecutorLabelAnnotationTemplate = "spark.kubernetes.executor.label.sparkoperator.k8s.io/%s=%s" - SparkExecutorLabelTemplate = "spark.kubernetes.executor.label.%s=%s" -) - -func TestAddLocalDir_HostPath(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 0, len(app.Spec.Volumes)) - assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 2, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) -} - -func TestAddLocalDir_PVC(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "/tmp/mnt-1", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 0, len(app.Spec.Volumes)) - assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 2, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "persistentVolumeClaim", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "persistentVolumeClaim", volumes[0].Name, "claimName", volumes[0].PersistentVolumeClaim.ClaimName), localDirOptions[1]) -} - -func TestAddLocalDir_MixedVolumes(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt-1", - }, - }, - }, - { - Name: "log-dir", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/log/spark", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - { - Name: "log-dir", - MountPath: "/var/log/spark", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 1, len(app.Spec.Volumes)) - assert.Equal(t, 1, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 2, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) -} - -func TestAddLocalDir_MultipleScratchVolumes(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt-1", - }, - }, - }, - { - Name: "spark-local-dir-2", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt-2", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - { - Name: "spark-local-dir-2", - MountPath: "/tmp/mnt-2", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 0, len(app.Spec.Volumes)) - assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 4, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[1].Name, volumeMounts[1].MountPath), localDirOptions[2]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[1].Name, "path", volumes[1].HostPath.Path), localDirOptions[3]) -} - -func TestAddLocalDir_Executor(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 0, len(app.Spec.Volumes)) - assert.Equal(t, 0, len(app.Spec.Executor.VolumeMounts)) - assert.Equal(t, 2, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) -} - -func TestAddLocalDir_Driver_Executor(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt", - }, - }, - }, - { - Name: "test-volume", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/test", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - { - Name: "test-volume", - MountPath: "/tmp/test", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 1, len(app.Spec.Volumes)) - assert.Equal(t, 1, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 1, len(app.Spec.Executor.VolumeMounts)) - assert.Equal(t, 4, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[2]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[3]) -} - -func TestAddEmptyDir_Driver_Executor_WithSizeLimit(t *testing.T) { - sizeLimit := resource.MustParse("5Gi") - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{ - SizeLimit: &sizeLimit, - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 0, len(app.Spec.Volumes)) - assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 0, len(app.Spec.Executor.VolumeMounts)) - assert.Equal(t, 4, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[1]) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[2]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[3]) -} - -func TestPopulateLabels_Driver_Executor(t *testing.T) { - const ( - AppLabelKey = "app-label-key" - AppLabelValue = "app-label-value" - DriverLabelKey = "driver-label-key" - DriverLabelValue = "driver-label-key" - DriverServiceLabelKey = "driver-svc-label-key" - DriverServiceLabelValue = "driver-svc-label-value" - ExecutorLabelKey = "executor-label-key" - ExecutorLabelValue = "executor-label-key" - ) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - Labels: map[string]string{AppLabelKey: AppLabelValue}, - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - ServiceLabels: map[string]string{DriverServiceLabelKey: DriverServiceLabelValue}, - SparkPodSpec: v1beta2.SparkPodSpec{ - Labels: map[string]string{DriverLabelKey: DriverLabelValue}, - }, - }, - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - Labels: map[string]string{ExecutorLabelKey: ExecutorLabelValue}, - }, - }, - }, - } - - submissionID := uuid.New().String() - driverOptions, err := addDriverConfOptions(app, submissionID) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, 6, len(driverOptions)) - sort.Strings(driverOptions) - expectedDriverLabels := []string{ - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "app-name", "spark-test"), - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "submission-id", submissionID), - fmt.Sprintf(SparkDriverLabelTemplate, AppLabelKey, AppLabelValue), - fmt.Sprintf(SparkDriverLabelTemplate, DriverLabelKey, DriverLabelValue), - fmt.Sprintf(SparkDriverServiceLabelTemplate, DriverServiceLabelKey, DriverServiceLabelValue), - } - sort.Strings(expectedDriverLabels) - - if !reflect.DeepEqual(expectedDriverLabels, driverOptions) { - t.Errorf("Executor labels: wanted %+q got %+q", expectedDriverLabels, driverOptions) - } - - executorOptions, err := addExecutorConfOptions(app, submissionID) - sort.Strings(executorOptions) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, 5, len(executorOptions)) - expectedExecutorLabels := []string{ - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "app-name", "spark-test"), - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "submission-id", submissionID), - fmt.Sprintf(SparkExecutorLabelTemplate, AppLabelKey, AppLabelValue), - fmt.Sprintf(SparkExecutorLabelTemplate, ExecutorLabelKey, ExecutorLabelValue), - } - sort.Strings(expectedExecutorLabels) - - if !reflect.DeepEqual(expectedExecutorLabels, executorOptions) { - t.Errorf("Executor labels: wanted %+q got %+q", expectedExecutorLabels, executorOptions) - } -} - -func TestPopulateLabelsOverride_Driver_Executor(t *testing.T) { - const ( - AppLabelKey = "app-label-key" - AppLabelValue = "app-label-value" - DriverLabelKey = "driver-label-key" - DriverLabelValue = "driver-label-key" - DriverAppLabelOverride = "driver-app-label-override" - ExecutorLabelKey = "executor-label-key" - ExecutorLabelValue = "executor-label-key" - ExecutorAppLabelOverride = "executor-app-label-override" - ) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - Labels: map[string]string{AppLabelKey: AppLabelValue}, - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - Labels: map[string]string{DriverLabelKey: DriverLabelValue, AppLabelKey: DriverAppLabelOverride}, - }, - }, - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - Labels: map[string]string{ExecutorLabelKey: ExecutorLabelValue, AppLabelKey: ExecutorAppLabelOverride}, - }, - }, - }, - } - - submissionID := uuid.New().String() - driverOptions, err := addDriverConfOptions(app, submissionID) - if err != nil { - t.Fatal(err) - } - sort.Strings(driverOptions) - assert.Equal(t, 5, len(driverOptions)) - expectedDriverLabels := []string{ - fmt.Sprintf(SparkDriverLabelTemplate, AppLabelKey, DriverAppLabelOverride), - fmt.Sprintf(SparkDriverLabelTemplate, DriverLabelKey, DriverLabelValue), - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "app-name", "spark-test"), - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "submission-id", submissionID), - } - sort.Strings(expectedDriverLabels) - - if !reflect.DeepEqual(expectedDriverLabels, driverOptions) { - t.Errorf("Executor labels: wanted %+q got %+q", expectedDriverLabels, driverOptions) - } - - executorOptions, err := addExecutorConfOptions(app, submissionID) - if err != nil { - t.Fatal(err) - } - sort.Strings(executorOptions) - assert.Equal(t, 5, len(executorOptions)) - expectedExecutorLabels := []string{ - fmt.Sprintf(SparkExecutorLabelTemplate, AppLabelKey, ExecutorAppLabelOverride), - fmt.Sprintf(SparkExecutorLabelTemplate, ExecutorLabelKey, ExecutorLabelValue), - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "app-name", "spark-test"), - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "submission-id", submissionID), - } - sort.Strings(expectedExecutorLabels) - - if !reflect.DeepEqual(expectedExecutorLabels, executorOptions) { - t.Errorf("Executor labels: wanted %+q got %+q", expectedExecutorLabels, executorOptions) - } -} - -func TestDynamicAllocationOptions(t *testing.T) { - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{}, - } - options := addDynamicAllocationConfOptions(app) - assert.Equal(t, 0, len(options)) - - app = &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - DynamicAllocation: &v1beta2.DynamicAllocation{ - Enabled: true, - InitialExecutors: int32ptr(2), - MinExecutors: int32ptr(0), - MaxExecutors: int32ptr(10), - ShuffleTrackingTimeout: int64ptr(6000000), - }, - }, - } - - options = addDynamicAllocationConfOptions(app) - assert.Equal(t, 6, len(options)) - assert.Equal(t, fmt.Sprintf("%s=true", config.SparkDynamicAllocationEnabled), options[0]) - assert.Equal(t, fmt.Sprintf("%s=true", config.SparkDynamicAllocationShuffleTrackingEnabled), options[1]) - assert.Equal(t, fmt.Sprintf("%s=2", config.SparkDynamicAllocationInitialExecutors), options[2]) - assert.Equal(t, fmt.Sprintf("%s=0", config.SparkDynamicAllocationMinExecutors), options[3]) - assert.Equal(t, fmt.Sprintf("%s=10", config.SparkDynamicAllocationMaxExecutors), options[4]) - assert.Equal(t, fmt.Sprintf("%s=6000000", config.SparkDynamicAllocationShuffleTrackingTimeout), options[5]) -} - -func TestProxyUserArg(t *testing.T) { - const ( - host = "localhost" - port = "6443" - ) - - if err := os.Setenv(kubernetesServiceHostEnvVar, host); err != nil { - t.Fatal(err) - } - if err := os.Setenv(kubernetesServicePortEnvVar, port); err != nil { - t.Fatal(err) - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Mode: v1beta2.ClusterMode, - ProxyUser: stringptr("foo"), - }, - } - - submissionID := uuid.New().String() - driverPodName := getDriverPodName(app) - args, err := buildSubmissionCommandArgs(app, driverPodName, submissionID) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "--master", args[0]) - assert.Equal(t, fmt.Sprintf("k8s://https://%s:%s", host, port), args[1]) - assert.Equal(t, "--deploy-mode", args[2]) - assert.Equal(t, string(v1beta2.ClusterMode), args[3]) - assert.Equal(t, "--proxy-user", args[4]) - assert.Equal(t, "foo", args[5]) -} - -func Test_getMasterURL(t *testing.T) { - setEnv := func(host string, port string) { - if err := os.Setenv(kubernetesServiceHostEnvVar, host); err != nil { - t.Fatal(err) - } - if err := os.Setenv(kubernetesServicePortEnvVar, port); err != nil { - t.Fatal(err) - } - } - - tests := []struct { - name string - host string - port string - want string - wantErr assert.ErrorAssertionFunc - }{ - { - name: "should return a valid master url when IPv4 address is used", - host: "localhost", - port: "6443", - want: "k8s://https://localhost:6443", - wantErr: assert.NoError, - }, - { - name: "should return a valid master url when IPv6 address is used", - host: "::1", - port: "6443", - want: "k8s://https://[::1]:6443", - wantErr: assert.NoError, - }, - { - name: "should throw an error when the host is empty", - host: "", - port: "6443", - want: "", - wantErr: assert.Error, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - setEnv(tt.host, tt.port) - got, err := getMasterURL() - if !tt.wantErr(t, err, fmt.Sprintf("getMasterURL()")) { - return - } - assert.Equalf(t, tt.want, got, "getMasterURL()") - }) - } -} diff --git a/pkg/util/capabilities.go b/pkg/util/capabilities.go index 5040da6e9..068bdcb4d 100644 --- a/pkg/util/capabilities.go +++ b/pkg/util/capabilities.go @@ -38,6 +38,19 @@ func (c Capabilities) String() string { return strings.Join(keys, ", ") } +var ( + IngressCapabilities Capabilities +) + +func InitializeIngressCapabilities(client kubernetes.Interface) (err error) { + if IngressCapabilities != nil { + return + } + + IngressCapabilities, err = getPreferredAvailableAPIs(client, "Ingress") + return +} + // getPreferredAvailableAPIs queries the cluster for the preferred resources information and returns a Capabilities // instance containing those api groups that support the specified kind. // @@ -70,15 +83,3 @@ func getPreferredAvailableAPIs(client kubernetes.Interface, kind string) (Capabi return caps, nil } - -var ( - IngressCapabilities Capabilities -) - -func InitializeIngressCapabilities(client kubernetes.Interface) (err error) { - if IngressCapabilities != nil { - return - } - IngressCapabilities, err = getPreferredAvailableAPIs(client, "Ingress") - return -} diff --git a/pkg/util/cert_test.go b/pkg/util/cert_test.go deleted file mode 100644 index 700bc234d..000000000 --- a/pkg/util/cert_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package util - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "testing" - "time" - - "k8s.io/client-go/util/cert" -) - -func TestNewPrivateKey(t *testing.T) { - _, err := NewPrivateKey() - if err != nil { - t.Errorf("failed to generate private key: %v", err) - } -} - -func TestNewSignedServerCert(t *testing.T) { - cfg := cert.Config{ - CommonName: "test-server", - Organization: []string{"test-org"}, - NotBefore: time.Now(), - } - - caKey, _ := rsa.GenerateKey(rand.Reader, RSAKeySize) - caCert := &x509.Certificate{} - serverKey, _ := rsa.GenerateKey(rand.Reader, RSAKeySize) - - serverCert, err := NewSignedServerCert(cfg, caKey, caCert, serverKey) - if err != nil { - t.Errorf("failed to generate signed server certificate: %v", err) - } - - if serverCert == nil { - t.Error("server certificate is nil") - } -} diff --git a/pkg/util/histogram_buckets.go b/pkg/util/histogram_buckets.go deleted file mode 100644 index 0cdf25da2..000000000 --- a/pkg/util/histogram_buckets.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "strconv" - "strings" -) - -var DefaultJobStartLatencyBuckets = []float64{30, 60, 90, 120, 150, 180, 210, 240, 270, 300} - -type HistogramBuckets []float64 - -func (hb *HistogramBuckets) String() string { - return fmt.Sprint(*hb) -} - -func (hb *HistogramBuckets) Set(value string) error { - *hb = nil - for _, boundaryStr := range strings.Split(value, ",") { - boundary, err := strconv.ParseFloat(strings.TrimSpace(boundaryStr), 64) - if err != nil { - return err - } - *hb = append(*hb, boundary) - } - return nil -} diff --git a/pkg/util/metrics.go b/pkg/util/metrics.go index 81cb14573..fe87508d9 100644 --- a/pkg/util/metrics.go +++ b/pkg/util/metrics.go @@ -17,201 +17,10 @@ limitations under the License. package util import ( - "fmt" - "net/http" "strings" - "sync" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - prometheusmodel "github.com/prometheus/client_model/go" - - "k8s.io/client-go/util/workqueue" ) func CreateValidMetricNameLabel(prefix, name string) string { // "-" is not a valid character for prometheus metric names or labels. return strings.Replace(prefix+name, "-", "_", -1) } - -// Best effort metric registration with Prometheus. -func RegisterMetric(metric prometheus.Collector) { - if err := prometheus.Register(metric); err != nil { - // Ignore AlreadyRegisteredError. - if _, ok := err.(prometheus.AlreadyRegisteredError); ok { - return - } - glog.Errorf("failed to register metric: %v", err) - } -} - -// MetricConfig is a container of configuration properties for the collection and exporting of -// application metrics to Prometheus. -type MetricConfig struct { - MetricsEndpoint string - MetricsPort string - MetricsPrefix string - MetricsLabels []string - MetricsJobStartLatencyBuckets []float64 -} - -// A variant of Prometheus Gauge that only holds non-negative values. -type PositiveGauge struct { - mux sync.RWMutex - name string - gaugeMetric *prometheus.GaugeVec -} - -func NewPositiveGauge(name string, description string, labels []string) *PositiveGauge { - validLabels := make([]string, len(labels)) - for i, label := range labels { - validLabels[i] = CreateValidMetricNameLabel("", label) - } - - gauge := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: name, - Help: description, - }, - validLabels, - ) - - return &PositiveGauge{ - gaugeMetric: gauge, - name: name, - } -} - -func fetchGaugeValue(m *prometheus.GaugeVec, labels map[string]string) float64 { - // Hack to get the current value of the metric to support PositiveGauge - pb := &prometheusmodel.Metric{} - - m.With(labels).Write(pb) - return pb.GetGauge().GetValue() -} - -func (p *PositiveGauge) Register() { - RegisterMetric(p.gaugeMetric) -} - -func (p *PositiveGauge) Value(labelMap map[string]string) float64 { - p.mux.RLock() - defer p.mux.RUnlock() - return fetchGaugeValue(p.gaugeMetric, labelMap) -} - -// Increment the Metric for the labels specified -func (p *PositiveGauge) Inc(labelMap map[string]string) { - p.mux.Lock() - defer p.mux.Unlock() - - if m, err := p.gaugeMetric.GetMetricWith(labelMap); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - glog.V(2).Infof("Incrementing %s with labels %s", p.name, labelMap) - m.Inc() - } -} - -// Decrement the metric only if its positive for the labels specified -func (p *PositiveGauge) Dec(labelMap map[string]string) { - p.mux.Lock() - defer p.mux.Unlock() - - // Decrement only if positive - val := fetchGaugeValue(p.gaugeMetric, labelMap) - if val > 0 { - glog.V(2).Infof("Decrementing %s with labels %s metricVal to %v", p.name, labelMap, val-1) - if m, err := p.gaugeMetric.GetMetricWith(labelMap); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Dec() - } - } -} - -type WorkQueueMetrics struct { - prefix string -} - -func InitializeMetrics(metricsConfig *MetricConfig) { - // Start the metrics endpoint for Prometheus to scrape - http.Handle(metricsConfig.MetricsEndpoint, promhttp.Handler()) - go http.ListenAndServe(fmt.Sprintf(":%s", metricsConfig.MetricsPort), nil) - glog.Infof("Started Metrics server at localhost:%s%s", metricsConfig.MetricsPort, metricsConfig.MetricsEndpoint) - - workQueueMetrics := WorkQueueMetrics{prefix: metricsConfig.MetricsPrefix} - workqueue.SetProvider(&workQueueMetrics) -} - -// Depth Metric for the kubernetes workqueue. -func (p *WorkQueueMetrics) NewDepthMetric(name string) workqueue.GaugeMetric { - depthMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_depth"), - Help: fmt.Sprintf("Current depth of workqueue: %s", name), - }, - ) - RegisterMetric(depthMetric) - return depthMetric -} - -// Adds Count Metrics for the kubernetes workqueue. -func (p *WorkQueueMetrics) NewAddsMetric(name string) workqueue.CounterMetric { - addsMetric := prometheus.NewCounter(prometheus.CounterOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_adds"), - Help: fmt.Sprintf("Total number of adds handled by workqueue: %s", name), - }) - RegisterMetric(addsMetric) - return addsMetric -} - -// Latency Metric for the kubernetes workqueue. -func (p *WorkQueueMetrics) NewLatencyMetric(name string) workqueue.HistogramMetric { - latencyMetric := prometheus.NewSummary(prometheus.SummaryOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_latency"), - Help: fmt.Sprintf("Latency for workqueue: %s", name), - }) - RegisterMetric(latencyMetric) - return latencyMetric -} - -// WorkDuration Metric for the kubernetes workqueue. -func (p *WorkQueueMetrics) NewWorkDurationMetric(name string) workqueue.HistogramMetric { - workDurationMetric := prometheus.NewSummary(prometheus.SummaryOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_work_duration"), - Help: fmt.Sprintf("How long processing an item from workqueue %s takes.", name), - }) - RegisterMetric(workDurationMetric) - return workDurationMetric -} - -// Retry Metric for the kubernetes workqueue. -func (p *WorkQueueMetrics) NewRetriesMetric(name string) workqueue.CounterMetric { - retriesMetrics := prometheus.NewCounter(prometheus.CounterOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_retries"), - Help: fmt.Sprintf("Total number of retries handled by workqueue: %s", name), - }) - RegisterMetric(retriesMetrics) - return retriesMetrics -} - -func (p *WorkQueueMetrics) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { - unfinishedWorkSecondsMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_unfinished_work_seconds"), - Help: fmt.Sprintf("Unfinished work seconds: %s", name), - }, - ) - RegisterMetric(unfinishedWorkSecondsMetric) - return unfinishedWorkSecondsMetric -} - -func (p *WorkQueueMetrics) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { - longestRunningProcessorMicrosecondsMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_longest_running_processor_microseconds"), - Help: fmt.Sprintf("Longest running processor microseconds: %s", name), - }, - ) - RegisterMetric(longestRunningProcessorMicrosecondsMetric) - return longestRunningProcessorMicrosecondsMetric -} diff --git a/pkg/util/metrics_test.go b/pkg/util/metrics_test.go deleted file mode 100644 index 4771a0858..000000000 --- a/pkg/util/metrics_test.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPositiveGauge_EmptyLabels(t *testing.T) { - gauge := NewPositiveGauge("testGauge", "test-description", []string{}) - emptyMap := map[string]string{} - gauge.Dec(emptyMap) - assert.Equal(t, fetchGaugeValue(gauge.gaugeMetric, emptyMap), float64(0)) - - gauge.Inc(emptyMap) - assert.Equal(t, fetchGaugeValue(gauge.gaugeMetric, emptyMap), float64(1)) - gauge.Dec(map[string]string{}) - assert.Equal(t, fetchGaugeValue(gauge.gaugeMetric, emptyMap), float64(0)) -} - -func TestPositiveGauge_WithLabels(t *testing.T) { - gauge := NewPositiveGauge("testGauge1", "test-description-1", []string{"app_id"}) - app1 := map[string]string{"app_id": "test1"} - app2 := map[string]string{"app_id": "test2"} - - var wg sync.WaitGroup - wg.Add(2) - go func() { - for i := 0; i < 10; i++ { - gauge.Inc(app1) - } - for i := 0; i < 5; i++ { - gauge.Dec(app1) - } - wg.Done() - }() - go func() { - for i := 0; i < 5; i++ { - gauge.Inc(app2) - } - for i := 0; i < 10; i++ { - gauge.Dec(app2) - } - wg.Done() - }() - - wg.Wait() - assert.Equal(t, float64(5), fetchGaugeValue(gauge.gaugeMetric, app1)) - // Always Positive Gauge. - assert.Equal(t, float64(0), fetchGaugeValue(gauge.gaugeMetric, app2)) -} diff --git a/pkg/webhook/scheme.go b/pkg/util/resourcequota.go similarity index 57% rename from pkg/webhook/scheme.go rename to pkg/util/resourcequota.go index e9a02c5f0..1b79eea23 100644 --- a/pkg/webhook/scheme.go +++ b/pkg/util/resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,25 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package webhook +package util import ( - admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" ) -var ( - scheme = runtime.NewScheme() - codecs = serializer.NewCodecFactory(scheme) -) - -func init() { - addToScheme(scheme) -} - -func addToScheme(scheme *runtime.Scheme) { - corev1.AddToScheme(scheme) - admissionv1.AddToScheme(scheme) +// SumResourceList sums the resource list. +func SumResourceList(lists []corev1.ResourceList) corev1.ResourceList { + total := corev1.ResourceList{} + for _, list := range lists { + for name, quantity := range list { + if value, ok := total[name]; !ok { + total[name] = quantity.DeepCopy() + } else { + value.Add(quantity) + total[name] = value + } + } + } + return total } diff --git a/pkg/util/sparkapplication.go b/pkg/util/sparkapplication.go new file mode 100644 index 000000000..273ad7401 --- /dev/null +++ b/pkg/util/sparkapplication.go @@ -0,0 +1,430 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "reflect" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" +) + +// GetDriverPodName returns name of the driver pod of the given spark application. +func GetDriverPodName(app *v1beta2.SparkApplication) string { + name := app.Spec.Driver.PodName + if name != nil && len(*name) > 0 { + return *name + } + + sparkConf := app.Spec.SparkConf + if sparkConf[common.SparkKubernetesDriverPodName] != "" { + return sparkConf[common.SparkKubernetesDriverPodName] + } + + return fmt.Sprintf("%s-driver", app.Name) +} + +// GetApplicationState returns the state of the given SparkApplication. +func GetApplicationState(app *v1beta2.SparkApplication) v1beta2.ApplicationStateType { + return app.Status.AppState.State +} + +// IsExpired returns whether the given SparkApplication is expired. +func IsExpired(app *v1beta2.SparkApplication) bool { + // The application has no TTL defined and will never expire. + if app.Spec.TimeToLiveSeconds == nil { + return false + } + + ttl := time.Duration(*app.Spec.TimeToLiveSeconds) * time.Second + now := time.Now() + if !app.Status.TerminationTime.IsZero() && now.Sub(app.Status.TerminationTime.Time) > ttl { + return true + } + + return false +} + +// IsDriverRunning returns whether the driver pod of the given SparkApplication is running. +func IsDriverRunning(app *v1beta2.SparkApplication) bool { + return app.Status.AppState.State == v1beta2.ApplicationStateRunning +} + +func ShouldRetry(app *v1beta2.SparkApplication) bool { + switch app.Status.AppState.State { + case v1beta2.ApplicationStateSucceeding: + return app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyAlways + case v1beta2.ApplicationStateFailing: + if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyAlways { + return true + } else if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyOnFailure { + // We retry if we haven't hit the retry limit. + if app.Spec.RestartPolicy.OnFailureRetries != nil && app.Status.ExecutionAttempts <= *app.Spec.RestartPolicy.OnFailureRetries { + return true + } + } + case v1beta2.ApplicationStateFailedSubmission: + if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyAlways { + return true + } else if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyOnFailure { + // We retry if we haven't hit the retry limit. + if app.Spec.RestartPolicy.OnSubmissionFailureRetries != nil && app.Status.SubmissionAttempts <= *app.Spec.RestartPolicy.OnSubmissionFailureRetries { + return true + } + } + } + return false +} + +func GetLocalVolumes(app *v1beta2.SparkApplication) map[string]corev1.Volume { + volumes := make(map[string]corev1.Volume) + for _, volume := range app.Spec.Volumes { + if strings.HasPrefix(volume.Name, common.SparkLocalDirVolumePrefix) { + volumes[volume.Name] = volume + } + } + return volumes +} + +func GetDriverLocalVolumeMounts(app *v1beta2.SparkApplication) []corev1.VolumeMount { + volumeMounts := []corev1.VolumeMount{} + for _, volumeMount := range app.Spec.Driver.VolumeMounts { + if strings.HasPrefix(volumeMount.Name, common.SparkLocalDirVolumePrefix) { + volumeMounts = append(volumeMounts, volumeMount) + } + } + return volumeMounts +} + +func GetExecutorLocalVolumeMounts(app *v1beta2.SparkApplication) []corev1.VolumeMount { + volumeMounts := []corev1.VolumeMount{} + for _, volumeMount := range app.Spec.Executor.VolumeMounts { + if strings.HasPrefix(volumeMount.Name, common.SparkLocalDirVolumePrefix) { + volumeMounts = append(volumeMounts, volumeMount) + } + } + return volumeMounts +} + +func GetDefaultUIServiceName(app *v1beta2.SparkApplication) string { + return fmt.Sprintf("%s-ui-svc", app.Name) +} + +func GetDefaultUIIngressName(app *v1beta2.SparkApplication) string { + return fmt.Sprintf("%s-ui-ingress", app.Name) +} + +func GetResourceLabels(app *v1beta2.SparkApplication) map[string]string { + labels := map[string]string{ + common.LabelSparkAppName: app.Name, + } + if app.Status.SubmissionID != "" { + labels[common.LabelSubmissionID] = app.Status.SubmissionID + } + return labels +} + +func GetWebUIServiceLabels(app *v1beta2.SparkApplication) map[string]string { + labels := map[string]string{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceLabels != nil { + for key, value := range app.Spec.SparkUIOptions.ServiceLabels { + labels[key] = value + } + } + return labels +} + +func GetWebUIServiceAnnotations(app *v1beta2.SparkApplication) map[string]string { + serviceAnnotations := map[string]string{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceAnnotations != nil { + for key, value := range app.Spec.SparkUIOptions.ServiceAnnotations { + serviceAnnotations[key] = value + } + } + return serviceAnnotations +} + +func GetWebUIServiceType(app *v1beta2.SparkApplication) corev1.ServiceType { + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceType != nil { + return *app.Spec.SparkUIOptions.ServiceType + } + return corev1.ServiceTypeClusterIP +} + +func GetWebUIIngressAnnotations(app *v1beta2.SparkApplication) map[string]string { + annotations := map[string]string{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.IngressAnnotations != nil { + for key, value := range app.Spec.SparkUIOptions.IngressAnnotations { + annotations[key] = value + } + } + return annotations +} + +func GetWebUIIngressTLS(app *v1beta2.SparkApplication) []networkingv1.IngressTLS { + ingressTLSs := []networkingv1.IngressTLS{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.IngressTLS != nil { + ingressTLSs = append(ingressTLSs, app.Spec.SparkUIOptions.IngressTLS...) + } + return ingressTLSs +} + +// GetPrometheusConfigMapName returns the name of the ConfigMap for Prometheus configuration. +func GetPrometheusConfigMapName(app *v1beta2.SparkApplication) string { + return fmt.Sprintf("%s-%s", app.Name, common.PrometheusConfigMapNameSuffix) +} + +// PrometheusMonitoringEnabled returns if Prometheus monitoring is enabled or not. +func PrometheusMonitoringEnabled(app *v1beta2.SparkApplication) bool { + return app.Spec.Monitoring != nil && app.Spec.Monitoring.Prometheus != nil +} + +// HasPrometheusConfigFile returns if Prometheus monitoring uses a configuration file in the container. +func HasPrometheusConfigFile(app *v1beta2.SparkApplication) bool { + return PrometheusMonitoringEnabled(app) && + app.Spec.Monitoring.Prometheus.ConfigFile != nil && + *app.Spec.Monitoring.Prometheus.ConfigFile != "" +} + +// HasPrometheusConfig returns if Prometheus monitoring defines metricsProperties in the spec. +func HasMetricsProperties(app *v1beta2.SparkApplication) bool { + return PrometheusMonitoringEnabled(app) && + app.Spec.Monitoring.MetricsProperties != nil && + *app.Spec.Monitoring.MetricsProperties != "" +} + +// HasPrometheusConfigFile returns if Monitoring defines metricsPropertiesFile in the spec. +func HasMetricsPropertiesFile(app *v1beta2.SparkApplication) bool { + return PrometheusMonitoringEnabled(app) && + app.Spec.Monitoring.MetricsPropertiesFile != nil && + *app.Spec.Monitoring.MetricsPropertiesFile != "" +} + +// ExposeDriverMetrics returns if driver metrics should be exposed. +func ExposeDriverMetrics(app *v1beta2.SparkApplication) bool { + return app.Spec.Monitoring != nil && app.Spec.Monitoring.ExposeDriverMetrics +} + +// ExposeExecutorMetrics returns if executor metrics should be exposed. +func ExposeExecutorMetrics(app *v1beta2.SparkApplication) bool { + return app.Spec.Monitoring != nil && app.Spec.Monitoring.ExposeExecutorMetrics +} + +// GetOwnerReference returns an OwnerReference pointing to the given app. +func GetOwnerReference(app *v1beta2.SparkApplication) metav1.OwnerReference { + return metav1.OwnerReference{ + APIVersion: v1beta2.SchemeGroupVersion.String(), + Kind: reflect.TypeOf(v1beta2.SparkApplication{}).Name(), + Name: app.Name, + UID: app.UID, + Controller: BoolPtr(true), + BlockOwnerDeletion: BoolPtr(true), + } +} + +// GetDriverState returns the driver state from the given driver pod. +func GetDriverState(pod *corev1.Pod) v1beta2.DriverState { + switch pod.Status.Phase { + case corev1.PodPending: + return v1beta2.DriverStatePending + case corev1.PodRunning: + state := GetDriverContainerTerminatedState(pod) + if state != nil { + if state.ExitCode == 0 { + return v1beta2.DriverStateCompleted + } + return v1beta2.DriverStateFailed + } + return v1beta2.DriverStateRunning + case corev1.PodSucceeded: + return v1beta2.DriverStateCompleted + case corev1.PodFailed: + state := GetDriverContainerTerminatedState(pod) + if state != nil && state.ExitCode == 0 { + return v1beta2.DriverStateCompleted + } + return v1beta2.DriverStateFailed + default: + return v1beta2.DriverStateUnknown + } +} + +// GetExecutorState returns the executor state from the given executor pod. +func GetExecutorState(pod *corev1.Pod) v1beta2.ExecutorState { + switch pod.Status.Phase { + case corev1.PodPending: + return v1beta2.ExecutorStatePending + case corev1.PodRunning: + return v1beta2.ExecutorStateRunning + case corev1.PodSucceeded: + return v1beta2.ExecutorStateCompleted + case corev1.PodFailed: + return v1beta2.ExecutorStateFailed + default: + return v1beta2.ExecutorStateUnknown + } +} + +// GetDriverContainerTerminatedState returns the terminated state of the driver container. +func GetDriverContainerTerminatedState(pod *corev1.Pod) *corev1.ContainerStateTerminated { + return GetContainerTerminatedState(pod, common.SparkDriverContainerName) +} + +// GetExecutorContainerTerminatedState returns the terminated state of the executor container. +func GetExecutorContainerTerminatedState(pod *corev1.Pod) *corev1.ContainerStateTerminated { + state := GetContainerTerminatedState(pod, common.Spark3DefaultExecutorContainerName) + if state == nil { + state = GetContainerTerminatedState(pod, common.SparkExecutorContainerName) + } + return state +} + +// GetContainerTerminatedState returns the terminated state of the container. +func GetContainerTerminatedState(pod *corev1.Pod, container string) *corev1.ContainerStateTerminated { + for _, c := range pod.Status.ContainerStatuses { + if c.Name == container { + if c.State.Terminated != nil { + return c.State.Terminated + } + return nil + } + } + return nil +} + +// IsDriverTerminated returns whether the driver state is a terminated state. +func IsDriverTerminated(driverState v1beta2.DriverState) bool { + return driverState == v1beta2.DriverStateCompleted || driverState == v1beta2.DriverStateFailed +} + +// IsExecutorTerminated returns whether the executor state is a terminated state. +func IsExecutorTerminated(executorState v1beta2.ExecutorState) bool { + return executorState == v1beta2.ExecutorStateCompleted || executorState == v1beta2.ExecutorStateFailed +} + +// DriverStateToApplicationState converts driver state to application state. +func DriverStateToApplicationState(driverState v1beta2.DriverState) v1beta2.ApplicationStateType { + switch driverState { + case v1beta2.DriverStatePending: + return v1beta2.ApplicationStateSubmitted + case v1beta2.DriverStateRunning: + return v1beta2.ApplicationStateRunning + case v1beta2.DriverStateCompleted: + return v1beta2.ApplicationStateSucceeding + case v1beta2.DriverStateFailed: + return v1beta2.ApplicationStateFailing + default: + return v1beta2.ApplicationStateUnknown + } +} + +// GetDriverRequestResource returns the driver request resource list. +func GetDriverRequestResource(app *v1beta2.SparkApplication) corev1.ResourceList { + minResource := corev1.ResourceList{} + + // Cores correspond to driver's core request + if app.Spec.Driver.Cores != nil { + if value, err := resource.ParseQuantity(fmt.Sprintf("%d", *app.Spec.Driver.Cores)); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + + // CoreLimit correspond to driver's core limit, this attribute will be used only when core request is empty. + if app.Spec.Driver.CoreLimit != nil { + if _, ok := minResource[corev1.ResourceCPU]; !ok { + if value, err := resource.ParseQuantity(*app.Spec.Driver.CoreLimit); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + } + + // Memory + MemoryOverhead correspond to driver's memory request + if app.Spec.Driver.Memory != nil { + if value, err := resource.ParseQuantity(*app.Spec.Driver.Memory); err == nil { + minResource[corev1.ResourceMemory] = value + } + } + if app.Spec.Driver.MemoryOverhead != nil { + if value, err := resource.ParseQuantity(*app.Spec.Driver.MemoryOverhead); err == nil { + if existing, ok := minResource[corev1.ResourceMemory]; ok { + existing.Add(value) + minResource[corev1.ResourceMemory] = existing + } + } + } + + return minResource +} + +// GetExecutorRequestResource returns the executor request resource list. +func GetExecutorRequestResource(app *v1beta2.SparkApplication) corev1.ResourceList { + minResource := corev1.ResourceList{} + + // CoreRequest correspond to executor's core request + if app.Spec.Executor.CoreRequest != nil { + if value, err := resource.ParseQuantity(*app.Spec.Executor.CoreRequest); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + + // Use Core attribute if CoreRequest is empty + if app.Spec.Executor.Cores != nil { + if _, ok := minResource[corev1.ResourceCPU]; !ok { + if value, err := resource.ParseQuantity(fmt.Sprintf("%d", *app.Spec.Executor.Cores)); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + } + + // CoreLimit correspond to executor's core limit, this attribute will be used only when core request is empty. + if app.Spec.Executor.CoreLimit != nil { + if _, ok := minResource[corev1.ResourceCPU]; !ok { + if value, err := resource.ParseQuantity(*app.Spec.Executor.CoreLimit); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + } + + // Memory + MemoryOverhead correspond to executor's memory request + if app.Spec.Executor.Memory != nil { + if value, err := resource.ParseQuantity(*app.Spec.Executor.Memory); err == nil { + minResource[corev1.ResourceMemory] = value + } + } + if app.Spec.Executor.MemoryOverhead != nil { + if value, err := resource.ParseQuantity(*app.Spec.Executor.MemoryOverhead); err == nil { + if existing, ok := minResource[corev1.ResourceMemory]; ok { + existing.Add(value) + minResource[corev1.ResourceMemory] = existing + } + } + } + + resourceList := []corev1.ResourceList{{}} + for i := int32(0); i < *app.Spec.Executor.Instances; i++ { + resourceList = append(resourceList, minResource) + } + return SumResourceList(resourceList) +} diff --git a/pkg/util/sparkapplication_test.go b/pkg/util/sparkapplication_test.go new file mode 100644 index 000000000..7f0ab4a46 --- /dev/null +++ b/pkg/util/sparkapplication_test.go @@ -0,0 +1,330 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util_test + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var _ = Describe("GetDriverPodName", func() { + Context("SparkApplication without driver pod name field and driver pod name conf", func() { + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return the default driver pod name", func() { + Expect(util.GetDriverPodName(app)).To(Equal("test-app-driver")) + }) + }) + + Context("SparkApplication with only driver pod name field", func() { + driverPodName := "test-app-driver-pod" + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + PodName: &driverPodName, + }, + }, + } + + It("Should return the driver pod name from driver spec", func() { + Expect(util.GetDriverPodName(app)).To(Equal(driverPodName)) + }) + }) + + Context("SparkApplication with only driver pod name conf", func() { + driverPodName := "test-app-driver-pod" + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + SparkConf: map[string]string{ + common.SparkKubernetesDriverPodName: driverPodName, + }, + }, + } + + It("Should return the driver name from spark conf", func() { + Expect(util.GetDriverPodName(app)).To(Equal(driverPodName)) + }) + }) + + Context("SparkApplication with both driver pod name field and driver pod name conf", func() { + driverPodName1 := "test-app-driver-1" + driverPodName2 := "test-app-driver-2" + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + SparkConf: map[string]string{ + common.SparkKubernetesDriverPodName: driverPodName1, + }, + Driver: v1beta2.DriverSpec{ + PodName: &driverPodName2, + }, + }, + } + + It("Should return the driver pod name from driver spec", func() { + Expect(util.GetDriverPodName(app)).To(Equal(driverPodName2)) + }) + }) +}) + +var _ = Describe("GetApplicationState", func() { + Context("SparkApplication with completed state", func() { + app := &v1beta2.SparkApplication{ + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.ApplicationStateCompleted, + }, + }, + } + + It("Should return completed state", func() { + Expect(util.GetApplicationState(app)).To(Equal(v1beta2.ApplicationStateCompleted)) + }) + }) +}) + +var _ = Describe("IsExpired", func() { + Context("SparkApplication without TTL", func() { + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return false", func() { + Expect(util.IsExpired(app)).To(BeFalse()) + }) + }) + + Context("SparkApplication not terminated with TTL", func() { + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + TimeToLiveSeconds: util.Int64Ptr(3600), + }, + } + + It("Should return false", func() { + Expect(util.IsExpired(app)).To(BeFalse()) + }) + }) + + Context("SparkApplication terminated with TTL not expired", func() { + now := time.Now() + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + TimeToLiveSeconds: util.Int64Ptr(3600), + }, + Status: v1beta2.SparkApplicationStatus{ + TerminationTime: metav1.NewTime(now.Add(-30 * time.Minute)), + }, + } + + It("Should return false", func() { + Expect(util.IsExpired(app)).To(BeFalse()) + }) + }) + + Context("SparkApplication terminated with TTL expired", func() { + now := time.Now() + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + TimeToLiveSeconds: util.Int64Ptr(3600), + }, + Status: v1beta2.SparkApplicationStatus{ + TerminationTime: metav1.NewTime(now.Add(-2 * time.Hour)), + }, + } + + It("Should return true", func() { + Expect(util.IsExpired(app)).To(BeTrue()) + }) + }) +}) + +var _ = Describe("IsDriverRunning", func() { + Context("SparkApplication with completed state", func() { + app := &v1beta2.SparkApplication{ + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.ApplicationStateCompleted, + }, + }, + } + + It("Should return false", func() { + Expect(util.IsDriverRunning(app)).To(BeFalse()) + }) + }) + + Context("SparkApplication with running state", func() { + app := &v1beta2.SparkApplication{ + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.ApplicationStateRunning, + }, + }, + } + + It("Should return true", func() { + Expect(util.IsDriverRunning(app)).To(BeTrue()) + }) + }) +}) + +var _ = Describe("GetLocalVolumes", func() { + Context("SparkApplication with local volumes", func() { + volume1 := corev1.Volume{ + Name: "local-volume", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/tmp", + }, + }, + } + + volume2 := corev1.Volume{ + Name: "spark-local-dir-1", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/mnt/spark-local-dir-1", + }, + }, + } + + volume3 := corev1.Volume{ + Name: "spark-local-dir-2", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/mnt/spark-local-dir-2", + }, + }, + } + + app := &v1beta2.SparkApplication{ + Spec: v1beta2.SparkApplicationSpec{ + Volumes: []corev1.Volume{ + volume1, + volume2, + volume3, + }, + }, + } + + It("Should return volumes with the correct prefix", func() { + volumes := util.GetLocalVolumes(app) + expected := map[string]corev1.Volume{ + volume2.Name: volume2, + volume3.Name: volume3, + } + Expect(volumes).To(Equal(expected)) + }) + }) +}) + +var _ = Describe("GetDefaultUIServiceName", func() { + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return the default UI service name", func() { + Expect(util.GetDefaultUIServiceName(app)).To(Equal("test-app-ui-svc")) + }) +}) + +var _ = Describe("GetDefaultUIIngressName", func() { + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return the default UI ingress name", func() { + Expect(util.GetDefaultUIIngressName(app)).To(Equal("test-app-ui-ingress")) + }) +}) + +var _ = Describe("IsDriverTerminated", func() { + It("Should check whether driver is terminated", func() { + Expect(util.IsDriverTerminated(v1beta2.DriverStatePending)).To(BeFalse()) + Expect(util.IsDriverTerminated(v1beta2.DriverStateRunning)).To(BeFalse()) + Expect(util.IsDriverTerminated(v1beta2.DriverStateCompleted)).To(BeTrue()) + Expect(util.IsDriverTerminated(v1beta2.DriverStateFailed)).To(BeTrue()) + Expect(util.IsDriverTerminated(v1beta2.DriverStateUnknown)).To(BeFalse()) + }) +}) + +var _ = Describe("IsExecutorTerminated", func() { + It("Should check whether executor is terminated", func() { + Expect(util.IsExecutorTerminated(v1beta2.ExecutorStatePending)).To(BeFalse()) + Expect(util.IsExecutorTerminated(v1beta2.ExecutorStateRunning)).To(BeFalse()) + Expect(util.IsExecutorTerminated(v1beta2.ExecutorStateCompleted)).To(BeTrue()) + Expect(util.IsExecutorTerminated(v1beta2.ExecutorStateFailed)).To(BeTrue()) + Expect(util.IsExecutorTerminated(v1beta2.ExecutorStateUnknown)).To(BeFalse()) + }) +}) + +var _ = Describe("DriverStateToApplicationState", func() { + It("Should convert driver state to application state correctly", func() { + Expect(util.DriverStateToApplicationState(v1beta2.DriverStatePending)).To(Equal(v1beta2.ApplicationStateSubmitted)) + Expect(util.DriverStateToApplicationState(v1beta2.DriverStateRunning)).To(Equal(v1beta2.ApplicationStateRunning)) + Expect(util.DriverStateToApplicationState(v1beta2.DriverStateCompleted)).To(Equal(v1beta2.ApplicationStateSucceeding)) + Expect(util.DriverStateToApplicationState(v1beta2.DriverStateFailed)).To(Equal(v1beta2.ApplicationStateFailing)) + Expect(util.DriverStateToApplicationState(v1beta2.DriverStateUnknown)).To(Equal(v1beta2.ApplicationStateUnknown)) + }) +}) diff --git a/pkg/util/sparkpod.go b/pkg/util/sparkpod.go new file mode 100644 index 000000000..137fb0d5b --- /dev/null +++ b/pkg/util/sparkpod.go @@ -0,0 +1,48 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + corev1 "k8s.io/api/core/v1" + + "github.com/kubeflow/spark-operator/pkg/common" +) + +// IsLaunchedBySparkOperator returns whether the given pod is launched by the Spark Operator. +func IsLaunchedBySparkOperator(pod *corev1.Pod) bool { + return pod.Labels[common.LabelLaunchedBySparkOperator] == "true" +} + +// IsDriverPod returns whether the given pod is a Spark driver Pod. +func IsDriverPod(pod *corev1.Pod) bool { + return pod.Labels[common.LabelSparkRole] == common.SparkRoleDriver +} + +// IsExecutorPod returns whether the given pod is a Spark executor Pod. +func IsExecutorPod(pod *corev1.Pod) bool { + return pod.Labels[common.LabelSparkRole] == common.SparkRoleExecutor +} + +// GetAppName returns the spark application name by checking out pod labels. +func GetAppName(pod *corev1.Pod) string { + return pod.Labels[common.LabelSparkAppName] +} + +// GetSparkApplicationID returns the spark application ID by checking out pod labels. +func GetSparkApplicationID(pod *corev1.Pod) string { + return pod.Labels[common.LabelSparkApplicationSelector] +} diff --git a/pkg/util/sparkpod_test.go b/pkg/util/sparkpod_test.go new file mode 100644 index 000000000..a138f6795 --- /dev/null +++ b/pkg/util/sparkpod_test.go @@ -0,0 +1,301 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var _ = Describe("IsLaunchedBySparkOperator", func() { + Context("Pod without labels", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return false", func() { + Expect(util.IsLaunchedBySparkOperator(pod)).To(BeFalse()) + }) + }) + + Context("Pod without launched by spark operator label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + }, + }, + } + + It("Should return false", func() { + Expect(util.IsLaunchedBySparkOperator(pod)).To(BeFalse()) + }) + }) + + Context("Pod with launched by spark operator label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelLaunchedBySparkOperator: "true", + }, + }, + } + + It("Should return true", func() { + Expect(util.IsLaunchedBySparkOperator(pod)).To(BeTrue()) + }) + }) +}) + +var _ = Describe("IsDriverPod", func() { + Context("Pod without labels", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return false", func() { + Expect(util.IsDriverPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod without spark role label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + }, + }, + } + + It("Should return false", func() { + Expect(util.IsDriverPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod with spark role label not equal to driver", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelSparkRole: common.SparkRoleExecutor, + }, + }, + } + + It("Should return false", func() { + Expect(util.IsDriverPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod with spark role label equal to driver", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelSparkRole: common.SparkRoleDriver, + }, + }, + } + + It("Should return true", func() { + Expect(util.IsDriverPod(pod)).To(BeTrue()) + }) + }) +}) + +var _ = Describe("IsExecutorPod", func() { + Context("Pod without labels", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return false", func() { + Expect(util.IsExecutorPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod without spark role label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + }, + }, + } + + It("Should return false", func() { + Expect(util.IsExecutorPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod with spark role label not equal to executor", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelSparkRole: common.SparkRoleDriver, + }, + }, + } + + It("Should return false", func() { + Expect(util.IsExecutorPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod with spark role label equal to executor", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelSparkRole: common.SparkRoleExecutor, + }, + }, + } + + It("Should return true", func() { + Expect(util.IsExecutorPod(pod)).To(BeTrue()) + }) + }) +}) + +var _ = Describe("GetAppName", func() { + Context("Pod without labels", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return empty application name", func() { + Expect(util.GetAppName(pod)).To(BeEmpty()) + }) + }) + + Context("Pod without app name label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelScheduledSparkAppName: "true", + }, + }, + } + + It("Should return empty application name", func() { + Expect(util.GetAppName(pod)).To(BeEmpty()) + }) + }) + + Context("Pod with app name label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + }, + }, + } + + It("Should return the application name", func() { + Expect(util.GetAppName(pod)).To(Equal("test-app")) + }) + }) +}) + +var _ = Describe("GetSparkApplicationID", func() { + Context("Pod without labels", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return empty application ID", func() { + Expect(util.GetSparkApplicationID(pod)).To(BeEmpty()) + }) + }) + + Context("Pod without spark app selector label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + }, + }, + } + + It("Should return empty application ID", func() { + Expect(util.GetSparkApplicationID(pod)).To(BeEmpty()) + }) + }) + + Context("Pod with spark app selector label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelSparkApplicationSelector: "test-app-id", + }, + }, + } + + It("Should return the application ID", func() { + Expect(util.GetSparkApplicationID(pod)).To(Equal("test-app-id")) + }) + }) +}) diff --git a/pkg/util/suite_test.go b/pkg/util/suite_test.go new file mode 100644 index 000000000..e442df146 --- /dev/null +++ b/pkg/util/suite_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestUtil(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Util Suite") +} + +var _ = BeforeSuite(func() { + log.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/util/util.go b/pkg/util/util.go index d39e2b19b..850bc209d 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -17,45 +17,63 @@ limitations under the License. package util import ( - "hash" - "hash/fnv" - "reflect" + "fmt" + "os" + "strings" - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/common" ) -// NewHash32 returns a 32-bit hash computed from the given byte slice. -func NewHash32() hash.Hash32 { - return fnv.New32() +func GetMasterURL() (string, error) { + kubernetesServiceHost := os.Getenv(common.EnvKubernetesServiceHost) + if kubernetesServiceHost == "" { + return "", fmt.Errorf("environment variable %s is not found", common.EnvKubernetesServiceHost) + } + + kubernetesServicePort := os.Getenv(common.EnvKubernetesServicePort) + if kubernetesServicePort == "" { + return "", fmt.Errorf("environment variable %s is not found", common.EnvKubernetesServicePort) + } + // check if the host is IPv6 address + if strings.Contains(kubernetesServiceHost, ":") && !strings.HasPrefix(kubernetesServiceHost, "[") { + return fmt.Sprintf("k8s://https://[%s]:%s", kubernetesServiceHost, kubernetesServicePort), nil + } + return fmt.Sprintf("k8s://https://%s:%s", kubernetesServiceHost, kubernetesServicePort), nil +} + +// Helper functions to check and remove a string from a slice of strings. +// ContainsString checks if a given string is present in a slice +func ContainsString(slice []string, s string) bool { + for _, item := range slice { + if item == s { + return true + } + } + return false } -// GetOwnerReference returns an OwnerReference pointing to the given app. -func GetOwnerReference(app *v1beta2.SparkApplication) metav1.OwnerReference { - controller := true - return metav1.OwnerReference{ - APIVersion: v1beta2.SchemeGroupVersion.String(), - Kind: reflect.TypeOf(v1beta2.SparkApplication{}).Name(), - Name: app.Name, - UID: app.UID, - Controller: &controller, +// RemoveString removes a given string from a slice, if present +func RemoveString(slice []string, s string) (result []string) { + for _, item := range slice { + if item != s { + result = append(result, item) + } } + return result +} + +func BoolPtr(b bool) *bool { + return &b } -// IsLaunchedBySparkOperator returns whether the given pod is launched by the Spark Operator. -func IsLaunchedBySparkOperator(pod *apiv1.Pod) bool { - return pod.Labels[config.LaunchedBySparkOperatorLabel] == "true" +func Int32Ptr(n int32) *int32 { + return &n } -// IsDriverPod returns whether the given pod is a Spark driver Pod. -func IsDriverPod(pod *apiv1.Pod) bool { - return pod.Labels[config.SparkRoleLabel] == config.SparkDriverRole +func Int64Ptr(n int64) *int64 { + return &n } -// IsExecutorPod returns whether the given pod is a Spark executor Pod. -func IsExecutorPod(pod *apiv1.Pod) bool { - return pod.Labels[config.SparkRoleLabel] == config.SparkExecutorRole +func StringPtr(s string) *string { + return &s } diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go new file mode 100644 index 000000000..324ed3580 --- /dev/null +++ b/pkg/util/util_test.go @@ -0,0 +1,131 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util_test + +import ( + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var _ = Describe("GetMasterURL", func() { + BeforeEach(func() { + os.Setenv(common.EnvKubernetesServiceHost, "127.0.0.1") + os.Setenv(common.EnvKubernetesServicePort, "443") + }) + + AfterEach(func() { + os.Unsetenv(common.EnvKubernetesServiceHost) + os.Unsetenv(common.EnvKubernetesServicePort) + }) + + Context("IPv4 address", func() { + It("Should return correct master URL without error", func() { + masterURL, err := util.GetMasterURL() + Expect(masterURL).To(Equal("k8s://https://127.0.0.1:443")) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) + +var _ = Describe("GetMasterURL", func() { + BeforeEach(func() { + os.Setenv(common.EnvKubernetesServiceHost, "::1") + os.Setenv(common.EnvKubernetesServicePort, "443") + }) + + AfterEach(func() { + os.Unsetenv(common.EnvKubernetesServiceHost) + os.Unsetenv(common.EnvKubernetesServicePort) + }) + + Context("IPv6 address", func() { + It("Should return correct master URL without error", func() { + masterURL, err := util.GetMasterURL() + Expect(masterURL).To(Equal("k8s://https://[::1]:443")) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) + +var _ = Describe("ContainsString", func() { + slice := []string{"a", "b", "c"} + + Context("When the string is in the slice", func() { + It("Should return true", func() { + Expect(util.ContainsString(slice, "b")).To(BeTrue()) + }) + }) + + Context("When the string is not in the slice", func() { + It("Should return false", func() { + Expect(util.ContainsString(slice, "d")).To(BeFalse()) + }) + }) +}) + +var _ = Describe("RemoveString", func() { + Context("When the string is in the slice", func() { + slice := []string{"a", "b", "c"} + expected := []string{"a", "c"} + + It("Should remove the string", func() { + Expect(util.RemoveString(slice, "b")).To(Equal(expected)) + }) + }) + + Context("When the string is not in the slice", func() { + slice := []string{"a", "b", "c"} + expected := []string{"a", "b", "c"} + + It("Should do nothing", func() { + Expect(util.RemoveString(slice, "d")).To(Equal(expected)) + }) + }) +}) + +var _ = Describe("BoolPtr", func() { + It("Should return a pointer to the given bool value", func() { + b := true + Expect(util.BoolPtr(b)).To(Equal(&b)) + }) +}) + +var _ = Describe("Int32Ptr", func() { + It("Should return a pointer to the given int32 value", func() { + i := int32(42) + Expect(util.Int32Ptr(i)).To(Equal(&i)) + }) +}) + +var _ = Describe("Int64Ptr", func() { + It("Should return a pointer to the given int64 value", func() { + i := int64(42) + Expect(util.Int64Ptr(i)).To(Equal(&i)) + }) +}) + +var _ = Describe("StringPtr", func() { + It("Should return a pointer to the given string value", func() { + s := "hello" + Expect(util.StringPtr(s)).To(Equal(&s)) + }) +}) diff --git a/pkg/webhook/certs.go b/pkg/webhook/certs.go deleted file mode 100644 index 75e066833..000000000 --- a/pkg/webhook/certs.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "net" - - "k8s.io/client-go/util/cert" - - "github.com/kubeflow/spark-operator/pkg/util" -) - -const ( - Organization = "spark-operator" -) - -// certProvider is a container of a X509 certificate file and a corresponding key file for the -// webhook server, and a CA certificate file for the API server to verify the server certificate. -type certProvider struct { - caKey *rsa.PrivateKey - caCert *x509.Certificate - serverKey *rsa.PrivateKey - serverCert *x509.Certificate -} - -// NewCertProvider creates a new CertProvider instance. -func NewCertProvider(name, namespace string) (*certProvider, error) { - commonName := fmt.Sprintf("%s.%s.svc", name, namespace) - - // Generate CA private caKey - caKey, err := util.NewPrivateKey() - if err != nil { - return nil, fmt.Errorf("failed to generate CA private key: %v", err) - } - - // Generate self-signed CA certificate - caCfg := cert.Config{ - CommonName: commonName, - Organization: []string{Organization}, - } - caCert, err := cert.NewSelfSignedCACert(caCfg, caKey) - if err != nil { - return nil, fmt.Errorf("failed to generate self-signed CA certificate: %v", err) - } - - // Generate server private key - serverKey, err := util.NewPrivateKey() - if err != nil { - return nil, fmt.Errorf("failed to generate server private key: %v", err) - } - - // Generate signed server certificate - var ips []net.IP - dnsNames := []string{"localhost"} - hostIP := net.ParseIP(commonName) - if hostIP.To4() != nil { - ips = append(ips, hostIP.To4()) - } else { - dnsNames = append(dnsNames, commonName) - } - serverCfg := cert.Config{ - CommonName: commonName, - Organization: []string{Organization}, - AltNames: cert.AltNames{IPs: ips, DNSNames: dnsNames}, - } - serverCert, err := util.NewSignedServerCert(serverCfg, caKey, caCert, serverKey) - if err != nil { - return nil, fmt.Errorf("failed to generate signed server certificate: %v", err) - } - - certProvider := certProvider{ - caKey: caKey, - caCert: caCert, - serverKey: serverKey, - serverCert: serverCert, - } - - return &certProvider, nil -} - -// CAKey returns the PEM-encoded CA private key. -func (cp *certProvider) CAKey() ([]byte, error) { - if cp.caKey == nil { - return nil, errors.New("CA key is not set") - } - data := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(cp.caKey), - }) - return data, nil -} - -// CACert returns the PEM-encoded CA certificate. -func (cp *certProvider) CACert() ([]byte, error) { - if cp.caCert == nil { - return nil, errors.New("CA certificate is not set") - } - data := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cp.serverCert.Raw, - }) - return data, nil -} - -// ServerKey returns the PEM-encoded server private key. -func (cp *certProvider) ServerKey() ([]byte, error) { - if cp.serverKey == nil { - return nil, errors.New("server key is not set") - } - data := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(cp.serverKey), - }) - return data, nil -} - -// ServerCert returns the PEM-encoded server cert. -func (cp *certProvider) ServerCert() ([]byte, error) { - if cp.serverCert == nil { - return nil, errors.New("server cert is not set") - } - data := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cp.serverCert.Raw, - }) - return data, nil -} - -// TLSConfig returns the TLS configuration. -func (cp *certProvider) TLSConfig() (*tls.Config, error) { - keyPEMBlock, err := cp.ServerKey() - if err != nil { - return nil, fmt.Errorf("failed to get server key: %v", err) - } - - certPEMBlock, err := cp.ServerCert() - if err != nil { - return nil, fmt.Errorf("failed to get server certificate: %v", err) - } - - tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) - if err != nil { - return nil, fmt.Errorf("failed to generate TLS certificate: %v", err) - } - - cfg := &tls.Config{ - Certificates: []tls.Certificate{tlsCert}, - } - return cfg, nil -} diff --git a/pkg/webhook/certs_test.go b/pkg/webhook/certs_test.go deleted file mode 100644 index d8f10ec19..000000000 --- a/pkg/webhook/certs_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package webhook - -import "testing" - -// TestNewCertProvider tests the NewCertProvider function. -func TestNewCertProvider(t *testing.T) { - name := "test-name" - namespace := "test-namespace" - - cp, err := NewCertProvider(name, namespace) - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - // Check if the returned CertProvider has non-nil fields. - if cp.caKey == nil { - t.Error("CA key is nil") - } - if cp.caCert == nil { - t.Error("CA certificate is nil") - } - if cp.serverKey == nil { - t.Error("server key is nil") - } - if cp.serverCert == nil { - t.Error("server certificate is nil") - } -} - -// TestCAKey tests the CAKey method of certProvider. -func TestCAKey(t *testing.T) { - cp, err := NewCertProvider("test-name", "test-namespace") - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - key, err := cp.CAKey() - if err != nil { - t.Errorf("failed to get CA key: %v", err) - } - - // Check if the returned key is not nil. - if key == nil { - t.Error("CA key is nil") - } -} - -// TestCACert tests the CACert method of certProvider. -func TestCACert(t *testing.T) { - cp, err := NewCertProvider("test-name", "test-namespace") - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - cert, err := cp.CACert() - if err != nil { - t.Errorf("failed to get CA certificate: %v", err) - } - - // Check if the returned certificate is not nil. - if cert == nil { - t.Error("CA certificate is nil") - } -} - -// TestServerKey tests the ServerKey method of certProvider. -func TestServerKey(t *testing.T) { - cp, err := NewCertProvider("test-name", "test-namespace") - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - key, err := cp.ServerKey() - if err != nil { - t.Errorf("failed to get server key: %v", err) - } - - // Check if the returned key is not nil. - if key == nil { - t.Error("server key is nil") - } -} - -// TestServerCert tests the ServerCert method of certProvider. -func TestServerCert(t *testing.T) { - cp, err := NewCertProvider("test-name", "test-namespace") - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - cert, err := cp.ServerCert() - if err != nil { - t.Errorf("failed to get server certificate: %v", err) - } - - // Check if the returned certificate is not nil. - if cert == nil { - t.Error("server certificate is nil") - } -} - -// TestTLSConfig tests the TLSConfig method of certProvider. -func TestTLSConfig(t *testing.T) { - cp, err := NewCertProvider("test-name", "test-namespace") - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - cfg, err := cp.TLSConfig() - if err != nil { - t.Errorf("failed to get TLS configuration: %v", err) - } - - // Check if the returned configuration is not nil. - if cfg == nil { - t.Error("TLS configuration is nil") - } -} diff --git a/pkg/webhook/patch.go b/pkg/webhook/patch.go deleted file mode 100644 index a7c20a816..000000000 --- a/pkg/webhook/patch.go +++ /dev/null @@ -1,856 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "fmt" - "strings" - - "github.com/golang/glog" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" -) - -const ( - maxNameLength = 63 -) - -// patchOperation represents a RFC6902 JSON patch operation. -type patchOperation struct { - Op string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value,omitempty"` -} - -func patchSparkPod(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var patchOps []patchOperation - - if util.IsDriverPod(pod) { - patchOps = append(patchOps, addOwnerReference(pod, app)) - } - - patchOps = append(patchOps, addVolumes(pod, app)...) - patchOps = append(patchOps, addGeneralConfigMaps(pod, app)...) - patchOps = append(patchOps, addSparkConfigMap(pod, app)...) - patchOps = append(patchOps, addHadoopConfigMap(pod, app)...) - patchOps = append(patchOps, getPrometheusConfigPatches(pod, app)...) - patchOps = append(patchOps, addTolerations(pod, app)...) - patchOps = append(patchOps, addSidecarContainers(pod, app)...) - patchOps = append(patchOps, addInitContainers(pod, app)...) - patchOps = append(patchOps, addHostNetwork(pod, app)...) - patchOps = append(patchOps, addNodeSelectors(pod, app)...) - patchOps = append(patchOps, addDNSConfig(pod, app)...) - patchOps = append(patchOps, addEnvVars(pod, app)...) - patchOps = append(patchOps, addEnvFrom(pod, app)...) - patchOps = append(patchOps, addHostAliases(pod, app)...) - patchOps = append(patchOps, addContainerPorts(pod, app)...) - patchOps = append(patchOps, addPriorityClassName(pod, app)...) - - op := addSchedulerName(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - if pod.Spec.Affinity == nil { - op := addAffinity(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - } - - op = addPodSecurityContext(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - op = addSecurityContext(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - op = addGPU(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - op = addTerminationGracePeriodSeconds(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - op = addPodLifeCycleConfig(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - op = addShareProcessNamespace(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - return patchOps -} - -func addOwnerReference(pod *corev1.Pod, app *v1beta2.SparkApplication) patchOperation { - ownerReference := util.GetOwnerReference(app) - - path := "/metadata/ownerReferences" - var value interface{} - if len(pod.OwnerReferences) == 0 { - value = []metav1.OwnerReference{ownerReference} - } else { - path += "/-" - value = ownerReference - } - - return patchOperation{Op: "add", Path: path, Value: value} -} - -func addVolumes(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - volumes := app.Spec.Volumes - - volumeMap := make(map[string]corev1.Volume) - for _, v := range volumes { - volumeMap[v.Name] = v - } - - var volumeMounts []corev1.VolumeMount - if util.IsDriverPod(pod) { - volumeMounts = app.Spec.Driver.VolumeMounts - } else if util.IsExecutorPod(pod) { - volumeMounts = app.Spec.Executor.VolumeMounts - } - - var ops []patchOperation - addedVolumeMap := make(map[string]corev1.Volume) - for _, m := range volumeMounts { - // Skip adding localDirVolumes - if strings.HasPrefix(m.Name, config.SparkLocalDirVolumePrefix) { - continue - } - - if v, ok := volumeMap[m.Name]; ok { - if _, ok := addedVolumeMap[m.Name]; !ok { - ops = append(ops, addVolume(pod, v)) - addedVolumeMap[m.Name] = v - } - vmPatchOp := addVolumeMount(pod, m) - if vmPatchOp == nil { - return nil - } - ops = append(ops, *vmPatchOp) - } - } - - return ops -} - -func addVolume(pod *corev1.Pod, volume corev1.Volume) patchOperation { - path := "/spec/volumes" - var value interface{} - if len(pod.Spec.Volumes) == 0 { - value = []corev1.Volume{volume} - } else { - path += "/-" - value = volume - } - pod.Spec.Volumes = append(pod.Spec.Volumes, volume) - - return patchOperation{Op: "add", Path: path, Value: value} -} - -func addVolumeMount(pod *corev1.Pod, mount corev1.VolumeMount) *patchOperation { - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add VolumeMount %s as Spark container was not found in pod %s", mount.Name, pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/volumeMounts", i) - var value interface{} - if len(pod.Spec.Containers[i].VolumeMounts) == 0 { - value = []corev1.VolumeMount{mount} - } else { - path += "/-" - value = mount - } - pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts, mount) - - return &patchOperation{Op: "add", Path: path, Value: value} -} - -func addEnvVars(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var envVars []corev1.EnvVar - if util.IsDriverPod(pod) { - envVars = app.Spec.Driver.Env - } else if util.IsExecutorPod(pod) { - envVars = app.Spec.Executor.Env - } - - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add EnvVars as Spark container was not found in pod %s", pod.Name) - return nil - } - basePath := fmt.Sprintf("/spec/containers/%d/env", i) - - var value interface{} - var patchOps []patchOperation - - first := false - if len(pod.Spec.Containers[i].Env) == 0 { - first = true - } - - for _, envVar := range envVars { - path := basePath - if first { - value = []corev1.EnvVar{envVar} - first = false - } else { - path += "/-" - value = envVar - } - patchOps = append(patchOps, patchOperation{Op: "add", Path: path, Value: value}) - } - return patchOps -} - -func addEnvFrom(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var envFrom []corev1.EnvFromSource - if util.IsDriverPod(pod) { - envFrom = app.Spec.Driver.EnvFrom - } else if util.IsExecutorPod(pod) { - envFrom = app.Spec.Executor.EnvFrom - } - - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add EnvFrom as Spark container was not found in pod %s", pod.Name) - return nil - } - basePath := fmt.Sprintf("/spec/containers/%d/envFrom", i) - - var value interface{} - var patchOps []patchOperation - - first := false - if len(pod.Spec.Containers[i].EnvFrom) == 0 { - first = true - } - - for _, ef := range envFrom { - path := basePath - if first { - value = []corev1.EnvFromSource{ef} - first = false - } else { - path += "/-" - value = ef - } - patchOps = append(patchOps, patchOperation{Op: "add", Path: path, Value: value}) - } - return patchOps -} - -func addEnvironmentVariable(pod *corev1.Pod, envName, envValue string) *patchOperation { - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add environment variable %s as Spark container was not found in pod %s", envName, pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/env", i) - var value interface{} - if len(pod.Spec.Containers[i].Env) == 0 { - value = []corev1.EnvVar{{Name: envName, Value: envValue}} - } else { - path += "/-" - value = corev1.EnvVar{Name: envName, Value: envValue} - } - - return &patchOperation{Op: "add", Path: path, Value: value} -} - -func addSparkConfigMap(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var patchOps []patchOperation - sparkConfigMapName := app.Spec.SparkConfigMap - if sparkConfigMapName != nil { - patchOps = append(patchOps, addConfigMapVolume(pod, *sparkConfigMapName, config.SparkConfigMapVolumeName)) - vmPatchOp := addConfigMapVolumeMount(pod, config.SparkConfigMapVolumeName, config.DefaultSparkConfDir) - if vmPatchOp == nil { - return nil - } - patchOps = append(patchOps, *vmPatchOp) - envPatchOp := addEnvironmentVariable(pod, config.SparkConfDirEnvVar, config.DefaultSparkConfDir) - if envPatchOp == nil { - return nil - } - patchOps = append(patchOps, *envPatchOp) - } - return patchOps -} - -func addHadoopConfigMap(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var patchOps []patchOperation - hadoopConfigMapName := app.Spec.HadoopConfigMap - if hadoopConfigMapName != nil { - patchOps = append(patchOps, addConfigMapVolume(pod, *hadoopConfigMapName, config.HadoopConfigMapVolumeName)) - vmPatchOp := addConfigMapVolumeMount(pod, config.HadoopConfigMapVolumeName, config.DefaultHadoopConfDir) - if vmPatchOp == nil { - return nil - } - patchOps = append(patchOps, *vmPatchOp) - envPatchOp := addEnvironmentVariable(pod, config.HadoopConfDirEnvVar, config.DefaultHadoopConfDir) - if envPatchOp == nil { - return nil - } - patchOps = append(patchOps, *envPatchOp) - } - return patchOps -} - -func addGeneralConfigMaps(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var configMaps []v1beta2.NamePath - if util.IsDriverPod(pod) { - configMaps = app.Spec.Driver.ConfigMaps - } else if util.IsExecutorPod(pod) { - configMaps = app.Spec.Executor.ConfigMaps - } - - var patchOps []patchOperation - for _, namePath := range configMaps { - volumeName := namePath.Name + "-vol" - if len(volumeName) > maxNameLength { - volumeName = volumeName[0:maxNameLength] - glog.V(2).Infof("ConfigMap volume name is too long. Truncating to length %d. Result: %s.", maxNameLength, volumeName) - } - patchOps = append(patchOps, addConfigMapVolume(pod, namePath.Name, volumeName)) - vmPatchOp := addConfigMapVolumeMount(pod, volumeName, namePath.Path) - if vmPatchOp == nil { - return nil - } - patchOps = append(patchOps, *vmPatchOp) - } - return patchOps -} - -func getPrometheusConfigPatches(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - // Skip if Prometheus Monitoring is not enabled or an in-container ConfigFile is used, - // in which cases a Prometheus ConfigMap won't be created. - if !app.PrometheusMonitoringEnabled() || (app.HasMetricsPropertiesFile() && app.HasPrometheusConfigFile()) { - return nil - } - - if util.IsDriverPod(pod) && !app.ExposeDriverMetrics() { - return nil - } - if util.IsExecutorPod(pod) && !app.ExposeExecutorMetrics() { - return nil - } - - var patchOps []patchOperation - name := config.GetPrometheusConfigMapName(app) - volumeName := name + "-vol" - mountPath := config.PrometheusConfigMapMountPath - promPort := config.DefaultPrometheusJavaAgentPort - if app.Spec.Monitoring.Prometheus.Port != nil { - promPort = *app.Spec.Monitoring.Prometheus.Port - } - promProtocol := config.DefaultPrometheusPortProtocol - promPortName := config.DefaultPrometheusPortName - if app.Spec.Monitoring.Prometheus.PortName != nil { - promPortName = *app.Spec.Monitoring.Prometheus.PortName - } - - patchOps = append(patchOps, addConfigMapVolume(pod, name, volumeName)) - vmPatchOp := addConfigMapVolumeMount(pod, volumeName, mountPath) - if vmPatchOp == nil { - glog.Warningf("could not mount volume %s in path %s", volumeName, mountPath) - return nil - } - patchOps = append(patchOps, *vmPatchOp) - promPortPatchOp := addContainerPort(pod, promPort, promProtocol, promPortName) - if promPortPatchOp == nil { - glog.Warningf("could not expose port %d to scrape metrics outside the pod", promPort) - return nil - } - patchOps = append(patchOps, *promPortPatchOp) - return patchOps -} - -func addContainerPorts(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var ports []v1beta2.Port - - if util.IsDriverPod(pod) { - ports = app.Spec.Driver.Ports - } else if util.IsExecutorPod(pod) { - ports = app.Spec.Executor.Ports - } - - var patchOps []patchOperation - for _, p := range ports { - portPatchOp := addContainerPort(pod, p.ContainerPort, p.Protocol, p.Name) - if portPatchOp == nil { - glog.Warningf("could not expose port named %s", p.Name) - continue - } - patchOps = append(patchOps, *portPatchOp) - } - return patchOps -} - -func addContainerPort(pod *corev1.Pod, port int32, protocol string, portName string) *patchOperation { - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add containerPort %d as Spark container was not found in pod %s", port, pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/ports", i) - containerPort := corev1.ContainerPort{ - Name: portName, - ContainerPort: port, - Protocol: corev1.Protocol(protocol), - } - var value interface{} - if len(pod.Spec.Containers[i].Ports) == 0 { - value = []corev1.ContainerPort{containerPort} - } else { - path += "/-" - value = containerPort - } - pod.Spec.Containers[i].Ports = append(pod.Spec.Containers[i].Ports, containerPort) - return &patchOperation{Op: "add", Path: path, Value: value} -} - -func addConfigMapVolume(pod *corev1.Pod, configMapName string, configMapVolumeName string) patchOperation { - volume := corev1.Volume{ - Name: configMapVolumeName, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: configMapName, - }, - }, - }, - } - return addVolume(pod, volume) -} - -func addConfigMapVolumeMount(pod *corev1.Pod, configMapVolumeName string, mountPath string) *patchOperation { - mount := corev1.VolumeMount{ - Name: configMapVolumeName, - ReadOnly: true, - MountPath: mountPath, - } - return addVolumeMount(pod, mount) -} - -func addAffinity(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var affinity *corev1.Affinity - if util.IsDriverPod(pod) { - affinity = app.Spec.Driver.Affinity - } else if util.IsExecutorPod(pod) { - affinity = app.Spec.Executor.Affinity - } - - if affinity == nil { - return nil - } - return &patchOperation{Op: "add", Path: "/spec/affinity", Value: *affinity} -} - -func addTolerations(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var tolerations []corev1.Toleration - if util.IsDriverPod(pod) { - tolerations = app.Spec.Driver.SparkPodSpec.Tolerations - } else if util.IsExecutorPod(pod) { - tolerations = app.Spec.Executor.SparkPodSpec.Tolerations - } - - first := false - if len(pod.Spec.Tolerations) == 0 { - first = true - } - - var ops []patchOperation - for _, v := range tolerations { - ops = append(ops, addToleration(pod, v, first)) - if first { - first = false - } - } - return ops -} - -func addNodeSelectors(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var nodeSelector map[string]string - if util.IsDriverPod(pod) { - nodeSelector = app.Spec.Driver.NodeSelector - } else if util.IsExecutorPod(pod) { - nodeSelector = app.Spec.Executor.NodeSelector - } - - var ops []patchOperation - if len(nodeSelector) > 0 { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/nodeSelector", Value: nodeSelector}) - } - return ops -} - -func addDNSConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var dnsConfig *corev1.PodDNSConfig - - if util.IsDriverPod(pod) { - dnsConfig = app.Spec.Driver.DNSConfig - } else if util.IsExecutorPod(pod) { - dnsConfig = app.Spec.Executor.DNSConfig - } - - var ops []patchOperation - if dnsConfig != nil { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/dnsConfig", Value: dnsConfig}) - } - return ops -} - -func addSchedulerName(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var schedulerName *string - - //NOTE: Preferred to use `BatchScheduler` if application spec has it configured. - if app.Spec.BatchScheduler != nil { - schedulerName = app.Spec.BatchScheduler - } else if util.IsDriverPod(pod) { - schedulerName = app.Spec.Driver.SchedulerName - } else if util.IsExecutorPod(pod) { - schedulerName = app.Spec.Executor.SchedulerName - } - if schedulerName == nil || *schedulerName == "" { - return nil - } - return &patchOperation{Op: "add", Path: "/spec/schedulerName", Value: *schedulerName} -} - -func addPriorityClassName(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var priorityClassName *string - - if app.Spec.BatchSchedulerOptions != nil { - priorityClassName = app.Spec.BatchSchedulerOptions.PriorityClassName - } - - var ops []patchOperation - if priorityClassName != nil && *priorityClassName != "" { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/priorityClassName", Value: *priorityClassName}) - - if pod.Spec.Priority != nil { - ops = append(ops, patchOperation{Op: "remove", Path: "/spec/priority"}) - } - if pod.Spec.PreemptionPolicy != nil { - ops = append(ops, patchOperation{Op: "remove", Path: "/spec/preemptionPolicy"}) - } - } - - return ops -} - -func addToleration(pod *corev1.Pod, toleration corev1.Toleration, first bool) patchOperation { - path := "/spec/tolerations" - var value interface{} - if first { - value = []corev1.Toleration{toleration} - } else { - path += "/-" - value = toleration - } - - return patchOperation{Op: "add", Path: path, Value: value} -} - -func addPodSecurityContext(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var secContext *corev1.PodSecurityContext - if util.IsDriverPod(pod) { - secContext = app.Spec.Driver.PodSecurityContext - } else if util.IsExecutorPod(pod) { - secContext = app.Spec.Executor.PodSecurityContext - } - - if secContext == nil { - return nil - } - return &patchOperation{Op: "add", Path: "/spec/securityContext", Value: *secContext} -} - -func addSecurityContext(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var secContext *corev1.SecurityContext - if util.IsDriverPod(pod) { - secContext = app.Spec.Driver.SecurityContext - } else if util.IsExecutorPod(pod) { - secContext = app.Spec.Executor.SecurityContext - } - - if secContext == nil { - return nil - } - - i := findContainer(pod) - - if i < 0 { - glog.Warningf("Spark driver/executor container not found in pod %s", pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/securityContext", i) - return &patchOperation{Op: "add", Path: path, Value: *secContext} -} - -func addSidecarContainers(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var sidecars []corev1.Container - if util.IsDriverPod(pod) { - sidecars = app.Spec.Driver.Sidecars - } else if util.IsExecutorPod(pod) { - sidecars = app.Spec.Executor.Sidecars - } - - var ops []patchOperation - for _, c := range sidecars { - sd := c - if !hasContainer(pod, &sd) { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/containers/-", Value: &sd}) - } - } - return ops -} - -func addInitContainers(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var initContainers []corev1.Container - if util.IsDriverPod(pod) { - initContainers = app.Spec.Driver.InitContainers - } else if util.IsExecutorPod(pod) { - initContainers = app.Spec.Executor.InitContainers - } - - first := false - if len(pod.Spec.InitContainers) == 0 { - first = true - } - - var ops []patchOperation - for _, c := range initContainers { - sd := c - if first { - first = false - value := []corev1.Container{sd} - ops = append(ops, patchOperation{Op: "add", Path: "/spec/initContainers", Value: value}) - } else if !hasInitContainer(pod, &sd) { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/initContainers/-", Value: &sd}) - } - - } - return ops -} - -func addGPU(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var gpu *v1beta2.GPUSpec - if util.IsDriverPod(pod) { - gpu = app.Spec.Driver.GPU - } - if util.IsExecutorPod(pod) { - gpu = app.Spec.Executor.GPU - } - if gpu == nil { - return nil - } - if gpu.Name == "" { - glog.V(2).Infof("Please specify GPU resource name, such as: nvidia.com/gpu, amd.com/gpu etc. Current gpu spec: %+v", gpu) - return nil - } - if gpu.Quantity <= 0 { - glog.V(2).Infof("GPU Quantity must be positive. Current gpu spec: %+v", gpu) - return nil - } - - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add GPU as Spark container was not found in pod %s", pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/resources/limits", i) - var value interface{} - if len(pod.Spec.Containers[i].Resources.Limits) == 0 { - value = corev1.ResourceList{ - corev1.ResourceName(gpu.Name): *resource.NewQuantity(gpu.Quantity, resource.DecimalSI), - } - } else { - encoder := strings.NewReplacer("~", "~0", "/", "~1") - path += "/" + encoder.Replace(gpu.Name) - value = *resource.NewQuantity(gpu.Quantity, resource.DecimalSI) - } - return &patchOperation{Op: "add", Path: path, Value: value} -} - -func addHostNetwork(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var hostNetwork *bool - if util.IsDriverPod(pod) { - hostNetwork = app.Spec.Driver.HostNetwork - } - if util.IsExecutorPod(pod) { - hostNetwork = app.Spec.Executor.HostNetwork - } - - if hostNetwork == nil || *hostNetwork == false { - return nil - } - var ops []patchOperation - ops = append(ops, patchOperation{Op: "add", Path: "/spec/hostNetwork", Value: true}) - // For Pods with hostNetwork, explicitly set its DNS policy to “ClusterFirstWithHostNet” - // Detail: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy - ops = append(ops, patchOperation{Op: "add", Path: "/spec/dnsPolicy", Value: corev1.DNSClusterFirstWithHostNet}) - return ops -} - -func hasContainer(pod *corev1.Pod, container *corev1.Container) bool { - for _, c := range pod.Spec.Containers { - if container.Name == c.Name && container.Image == c.Image { - return true - } - } - return false -} - -func hasInitContainer(pod *corev1.Pod, container *corev1.Container) bool { - for _, c := range pod.Spec.InitContainers { - if container.Name == c.Name && container.Image == c.Image { - return true - } - } - return false -} - -func addTerminationGracePeriodSeconds(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - path := "/spec/terminationGracePeriodSeconds" - var gracePeriodSeconds *int64 - - if util.IsDriverPod(pod) { - gracePeriodSeconds = app.Spec.Driver.TerminationGracePeriodSeconds - } else if util.IsExecutorPod(pod) { - gracePeriodSeconds = app.Spec.Executor.TerminationGracePeriodSeconds - } - if gracePeriodSeconds == nil { - return nil - } - return &patchOperation{Op: "add", Path: path, Value: *gracePeriodSeconds} -} - -func addPodLifeCycleConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var lifeCycle *corev1.Lifecycle - var containerName string - if util.IsDriverPod(pod) { - lifeCycle = app.Spec.Driver.Lifecycle - containerName = config.SparkDriverContainerName - } else if util.IsExecutorPod(pod) { - lifeCycle = app.Spec.Executor.Lifecycle - containerName = config.SparkExecutorContainerName - } - if lifeCycle == nil { - return nil - } - - i := 0 - // Find the driver container in the pod. - for ; i < len(pod.Spec.Containers); i++ { - if pod.Spec.Containers[i].Name == containerName { - break - } - } - if i == len(pod.Spec.Containers) { - glog.Warningf("Spark container %s not found in pod %s", containerName, pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/lifecycle", i) - return &patchOperation{Op: "add", Path: path, Value: *lifeCycle} -} - -func findContainer(pod *corev1.Pod) int { - var candidateContainerNames []string - if util.IsDriverPod(pod) { - candidateContainerNames = append(candidateContainerNames, config.SparkDriverContainerName) - } else if util.IsExecutorPod(pod) { - // Spark 3.x changed the default executor container name so we need to include both. - candidateContainerNames = append(candidateContainerNames, config.SparkExecutorContainerName, config.Spark3DefaultExecutorContainerName) - } - - if len(candidateContainerNames) == 0 { - return -1 - } - - for i := 0; i < len(pod.Spec.Containers); i++ { - for _, name := range candidateContainerNames { - if pod.Spec.Containers[i].Name == name { - return i - } - } - } - return -1 -} - -func addHostAliases(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var hostAliases []corev1.HostAlias - if util.IsDriverPod(pod) { - hostAliases = app.Spec.Driver.HostAliases - } else if util.IsExecutorPod(pod) { - hostAliases = app.Spec.Executor.HostAliases - } - - first := false - if len(pod.Spec.HostAliases) == 0 { - first = true - } - - var ops []patchOperation - if len(hostAliases) > 0 { - if first { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/hostAliases", Value: hostAliases}) - } else { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/hostAliases/-", Value: hostAliases}) - } - } - return ops -} - -func addShareProcessNamespace(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var shareProcessNamespace *bool - if util.IsDriverPod(pod) { - shareProcessNamespace = app.Spec.Driver.ShareProcessNamespace - } - if util.IsExecutorPod(pod) { - shareProcessNamespace = app.Spec.Executor.ShareProcessNamespace - } - - if shareProcessNamespace == nil || *shareProcessNamespace == false { - return nil - } - return &patchOperation{Op: "add", Path: "/spec/shareProcessNamespace", Value: *shareProcessNamespace} -} diff --git a/pkg/webhook/resourceusage/enforcer.go b/pkg/webhook/resourceusage/enforcer.go deleted file mode 100644 index 87e9bbce0..000000000 --- a/pkg/webhook/resourceusage/enforcer.go +++ /dev/null @@ -1,95 +0,0 @@ -package resourceusage - -import ( - "fmt" - "github.com/golang/glog" - so "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/informers" - corev1informers "k8s.io/client-go/informers/core/v1" - "k8s.io/client-go/tools/cache" -) - -type ResourceQuotaEnforcer struct { - watcher ResourceUsageWatcher - resourceQuotaInformer corev1informers.ResourceQuotaInformer -} - -func NewResourceQuotaEnforcer(crdInformerFactory crdinformers.SharedInformerFactory, coreV1InformerFactory informers.SharedInformerFactory) ResourceQuotaEnforcer { - resourceUsageWatcher := newResourceUsageWatcher(crdInformerFactory, coreV1InformerFactory) - informer := coreV1InformerFactory.Core().V1().ResourceQuotas() - informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{}) - return ResourceQuotaEnforcer{ - watcher: resourceUsageWatcher, - resourceQuotaInformer: informer, - } -} - -func (r ResourceQuotaEnforcer) WaitForCacheSync(stopCh <-chan struct{}) error { - if !cache.WaitForCacheSync(stopCh, func() bool { - return r.resourceQuotaInformer.Informer().HasSynced() - }) { - return fmt.Errorf("cache sync canceled") - } - return nil -} - -func (r *ResourceQuotaEnforcer) admitResource(kind, namespace, name string, requestedResources ResourceList) (string, error) { - glog.V(2).Infof("Processing admission request for %s %s/%s, requesting: %s", kind, namespace, name, requestedResources) - resourceQuotas, err := r.resourceQuotaInformer.Lister().ResourceQuotas(namespace).List(labels.Everything()) - if err != nil { - return "", err - } - if (requestedResources.cpu.IsZero() && requestedResources.memory.IsZero()) || len(resourceQuotas) == 0 { - return "", nil - } - - currentNamespaceUsage, currentApplicationUsage := r.watcher.GetCurrentResourceUsageWithApplication(namespace, kind, name) - - for _, quota := range resourceQuotas { - // Scope selectors not currently supported, ignore any ResourceQuota that does not match everything. - if quota.Spec.ScopeSelector != nil || len(quota.Spec.Scopes) > 0 { - continue - } - - // If an existing application has increased its usage, check it against the quota again. If its usage hasn't increased, always allow it. - if requestedResources.cpu.Cmp(currentApplicationUsage.cpu) == 1 { - if cpuLimit, present := quota.Spec.Hard[corev1.ResourceCPU]; present { - availableCpu := cpuLimit - availableCpu.Sub(currentNamespaceUsage.cpu) - if requestedResources.cpu.Cmp(availableCpu) == 1 { - return fmt.Sprintf("%s %s/%s requests too many cores (%.3f cores requested, %.3f available).", kind, namespace, name, float64(requestedResources.cpu.MilliValue())/1000.0, float64(availableCpu.MilliValue())/1000.0), nil - } - } - } - - if requestedResources.memory.Cmp(currentApplicationUsage.memory) == 1 { - if memoryLimit, present := quota.Spec.Hard[corev1.ResourceMemory]; present { - availableMemory := memoryLimit - availableMemory.Sub(currentNamespaceUsage.memory) - if requestedResources.memory.Cmp(availableMemory) == 1 { - return fmt.Sprintf("%s %s/%s requests too much memory (%dMi requested, %dMi available).", kind, namespace, name, requestedResources.memory.Value()/(1<<20), availableMemory.Value()/(1<<20)), nil - } - } - } - } - return "", nil -} - -func (r *ResourceQuotaEnforcer) AdmitSparkApplication(app so.SparkApplication) (string, error) { - resourceUsage, err := sparkApplicationResourceUsage(app) - if err != nil { - return "", err - } - return r.admitResource(KindSparkApplication, app.ObjectMeta.Namespace, app.ObjectMeta.Name, resourceUsage) -} - -func (r *ResourceQuotaEnforcer) AdmitScheduledSparkApplication(app so.ScheduledSparkApplication) (string, error) { - resourceUsage, err := scheduledSparkApplicationResourceUsage(app) - if err != nil { - return "", err - } - return r.admitResource(KindScheduledSparkApplication, app.ObjectMeta.Namespace, app.ObjectMeta.Name, resourceUsage) -} diff --git a/pkg/webhook/resourceusage/handlers.go b/pkg/webhook/resourceusage/handlers.go deleted file mode 100644 index c4d86fd76..000000000 --- a/pkg/webhook/resourceusage/handlers.go +++ /dev/null @@ -1,119 +0,0 @@ -package resourceusage - -import ( - so "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - - "github.com/golang/glog" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/cache" -) - -func (r *ResourceUsageWatcher) onPodAdded(obj interface{}) { - pod := obj.(*corev1.Pod) - // A pod launched by the Spark operator will already be accounted for by the CRD informer callback - if !launchedBySparkOperator(pod.ObjectMeta) { - r.setResources("Pod", namespaceOrDefault(pod.ObjectMeta), pod.ObjectMeta.Name, podResourceUsage(pod), r.usageByNamespacePod) - } -} - -func (r *ResourceUsageWatcher) onPodUpdated(oldObj, newObj interface{}) { - newPod := newObj.(*corev1.Pod) - if !launchedBySparkOperator(newPod.ObjectMeta) { - if newPod.Status.Phase == corev1.PodFailed || newPod.Status.Phase == corev1.PodSucceeded { - r.deleteResources("Pod", namespaceOrDefault(newPod.ObjectMeta), newPod.ObjectMeta.Name, r.usageByNamespacePod) - } else { - r.setResources("Pod", namespaceOrDefault(newPod.ObjectMeta), newPod.ObjectMeta.Name, podResourceUsage(newPod), r.usageByNamespacePod) - } - } -} - -func (r *ResourceUsageWatcher) onPodDeleted(obj interface{}) { - var pod *corev1.Pod - switch o := obj.(type) { - case *corev1.Pod: - pod = o - case cache.DeletedFinalStateUnknown: - pod = o.Obj.(*corev1.Pod) - default: - return - } - if !launchedBySparkOperator(pod.ObjectMeta) { - r.deleteResources("Pod", namespaceOrDefault(pod.ObjectMeta), pod.ObjectMeta.Name, r.usageByNamespacePod) - } -} - -func (r *ResourceUsageWatcher) onSparkApplicationAdded(obj interface{}) { - app := obj.(*so.SparkApplication) - namespace := namespaceOrDefault(app.ObjectMeta) - resources, err := sparkApplicationResourceUsage(*app) - if err != nil { - glog.Errorf("failed to determine resource usage of SparkApplication %s/%s: %v", namespace, app.ObjectMeta.Name, err) - } else { - r.setResources(KindSparkApplication, namespace, app.ObjectMeta.Name, resources, r.usageByNamespaceApplication) - } -} - -func (r *ResourceUsageWatcher) onSparkApplicationUpdated(oldObj, newObj interface{}) { - oldApp := oldObj.(*so.SparkApplication) - newApp := newObj.(*so.SparkApplication) - if oldApp.ResourceVersion == newApp.ResourceVersion { - return - } - namespace := namespaceOrDefault(newApp.ObjectMeta) - newResources, err := sparkApplicationResourceUsage(*newApp) - if err != nil { - glog.Errorf("failed to determine resource usage of SparkApplication %s/%s: %v", namespace, newApp.ObjectMeta.Name, err) - } else { - r.setResources(KindSparkApplication, namespace, newApp.ObjectMeta.Name, newResources, r.usageByNamespaceApplication) - } -} - -func (r *ResourceUsageWatcher) onSparkApplicationDeleted(obj interface{}) { - var app *so.SparkApplication - switch o := obj.(type) { - case *so.SparkApplication: - app = o - case cache.DeletedFinalStateUnknown: - app = o.Obj.(*so.SparkApplication) - default: - return - } - namespace := namespaceOrDefault(app.ObjectMeta) - r.deleteResources(KindSparkApplication, namespace, app.ObjectMeta.Name, r.usageByNamespaceApplication) -} - -func (r *ResourceUsageWatcher) onScheduledSparkApplicationAdded(obj interface{}) { - app := obj.(*so.ScheduledSparkApplication) - namespace := namespaceOrDefault(app.ObjectMeta) - resources, err := scheduledSparkApplicationResourceUsage(*app) - if err != nil { - glog.Errorf("failed to determine resource usage of ScheduledSparkApplication %s/%s: %v", namespace, app.ObjectMeta.Name, err) - } else { - r.setResources(KindScheduledSparkApplication, namespace, app.ObjectMeta.Name, resources, r.usageByNamespaceScheduledApplication) - } -} - -func (r *ResourceUsageWatcher) onScheduledSparkApplicationUpdated(oldObj, newObj interface{}) { - newApp := oldObj.(*so.ScheduledSparkApplication) - namespace := namespaceOrDefault(newApp.ObjectMeta) - newResources, err := scheduledSparkApplicationResourceUsage(*newApp) - if err != nil { - glog.Errorf("failed to determine resource usage of ScheduledSparkApplication %s/%s: %v", namespace, newApp.ObjectMeta.Name, err) - } else { - r.setResources(KindSparkApplication, namespace, newApp.ObjectMeta.Name, newResources, r.usageByNamespaceScheduledApplication) - } -} - -func (r *ResourceUsageWatcher) onScheduledSparkApplicationDeleted(obj interface{}) { - var app *so.ScheduledSparkApplication - switch o := obj.(type) { - case *so.ScheduledSparkApplication: - app = o - case cache.DeletedFinalStateUnknown: - app = o.Obj.(*so.ScheduledSparkApplication) - default: - return - } - namespace := namespaceOrDefault(app.ObjectMeta) - r.deleteResources(KindScheduledSparkApplication, namespace, app.ObjectMeta.Name, r.usageByNamespaceScheduledApplication) -} diff --git a/pkg/webhook/resourceusage/util.go b/pkg/webhook/resourceusage/util.go deleted file mode 100644 index d256f3a73..000000000 --- a/pkg/webhook/resourceusage/util.go +++ /dev/null @@ -1,241 +0,0 @@ -package resourceusage - -import ( - "fmt" - so "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "math" - "regexp" - "strconv" - "strings" -) - -// ...are you serious, Go? -func max(x, y int64) int64 { - if x > y { - return x - } - return y -} - -const ( - // https://spark.apache.org/docs/latest/configuration.html - defaultCpuMillicores = 1000 - defaultMemoryBytes = 1 << 30 // 1Gi - defaultMemoryOverhead = 0.1 - - // https://github.com/apache/spark/blob/c4bbfd177b4e7cb46f47b39df9fd71d2d9a12c6d/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Constants.scala#L85 - minMemoryOverhead = 384 * (1 << 20) // 384Mi - nonJvmDefaultMemoryOverhead = 0.4 -) - -func namespaceOrDefault(meta metav1.ObjectMeta) string { - namespace := meta.Namespace - if namespace == "" { - namespace = "default" - } - return namespace -} - -func launchedBySparkOperator(meta metav1.ObjectMeta) bool { - val, present := meta.Labels[config.LaunchedBySparkOperatorLabel] - return present && val == "true" -} - -func resourcesRequiredToSchedule(resourceRequirements corev1.ResourceRequirements) (cpu int64, memoryBytes int64) { - if coresRequest, present := resourceRequirements.Requests[corev1.ResourceCPU]; present { - cpu = coresRequest.MilliValue() - } else if coresLimit, present := resourceRequirements.Limits[corev1.ResourceCPU]; present { - cpu = coresLimit.MilliValue() - } - if memoryRequest, present := resourceRequirements.Requests[corev1.ResourceMemory]; present { - memoryBytes = memoryRequest.Value() - } else if memoryLimit, present := resourceRequirements.Limits[corev1.ResourceMemory]; present { - memoryBytes = memoryLimit.Value() - } - return cpu, memoryBytes -} - -func coresRequiredForSparkPod(spec so.SparkPodSpec, instances int64) (int64, error) { - var cpu int64 - if spec.Cores != nil { - cpu = int64(*spec.Cores) * 1000 - } else { - cpu = defaultCpuMillicores - } - return cpu * instances, nil -} - -var javaStringSuffixes = map[string]int64{ - "b": 1, - "kb": 1 << 10, - "k": 1 << 10, - "mb": 1 << 20, - "m": 1 << 20, - "gb": 1 << 30, - "g": 1 << 30, - "tb": 1 << 40, - "t": 1 << 40, - "pb": 1 << 50, - "p": 1 << 50, -} - -var javaStringPattern = regexp.MustCompile(`([0-9]+)([a-z]+)?`) -var javaFractionStringPattern = regexp.MustCompile(`([0-9]+\.[0-9]+)([a-z]+)?`) - -// Logic copied from https://github.com/apache/spark/blob/5264164a67df498b73facae207eda12ee133be7d/common/network-common/src/main/java/org/apache/spark/network/util/JavaUtils.java#L276 -func parseJavaMemoryString(str string) (int64, error) { - lower := strings.ToLower(str) - if matches := javaStringPattern.FindStringSubmatch(lower); matches != nil { - value, err := strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, err - } - suffix := matches[2] - if multiplier, present := javaStringSuffixes[suffix]; present { - return multiplier * value, nil - } - } else if matches = javaFractionStringPattern.FindStringSubmatch(lower); matches != nil { - value, err := strconv.ParseFloat(matches[1], 64) - if err != nil { - return 0, err - } - suffix := matches[2] - if multiplier, present := javaStringSuffixes[suffix]; present { - return int64(float64(multiplier) * value), nil - } - } - return 0, fmt.Errorf("could not parse string '%s' as a Java-style memory value. Examples: 100kb, 1.5mb, 1g", str) -} - -// Logic copied from https://github.com/apache/spark/blob/c4bbfd177b4e7cb46f47b39df9fd71d2d9a12c6d/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicDriverFeatureStep.scala -func memoryRequiredForSparkPod(spec so.SparkPodSpec, memoryOverheadFactor *string, appType so.SparkApplicationType, replicas int64) (int64, error) { - var memoryBytes int64 - if spec.Memory != nil { - memory, err := parseJavaMemoryString(*spec.Memory) - if err != nil { - return 0, err - } - memoryBytes = memory - } else { - memoryBytes = defaultMemoryBytes - } - var memoryOverheadBytes int64 - if spec.MemoryOverhead != nil { - overhead, err := parseJavaMemoryString(*spec.MemoryOverhead) - if err != nil { - return 0, err - } - memoryOverheadBytes = overhead - } else { - var overheadFactor float64 - if memoryOverheadFactor != nil { - overheadFactorScope, err := strconv.ParseFloat(*memoryOverheadFactor, 64) - if err != nil { - return 0, err - } - overheadFactor = overheadFactorScope - } else { - if appType == so.JavaApplicationType { - overheadFactor = defaultMemoryOverhead - } else { - overheadFactor = nonJvmDefaultMemoryOverhead - } - } - memoryOverheadBytes = int64(math.Max(overheadFactor*float64(memoryBytes), minMemoryOverhead)) - } - return (memoryBytes + memoryOverheadBytes) * replicas, nil -} - -func resourceUsage(spec so.SparkApplicationSpec) (ResourceList, error) { - driverMemoryOverheadFactor := spec.MemoryOverheadFactor - executorMemoryOverheadFactor := spec.MemoryOverheadFactor - driverMemory, err := memoryRequiredForSparkPod(spec.Driver.SparkPodSpec, driverMemoryOverheadFactor, spec.Type, 1) - if err != nil { - return ResourceList{}, err - } - - var instances int64 = 1 - if spec.Executor.Instances != nil { - instances = int64(*spec.Executor.Instances) - } - executorMemory, err := memoryRequiredForSparkPod(spec.Executor.SparkPodSpec, executorMemoryOverheadFactor, spec.Type, instances) - if err != nil { - return ResourceList{}, err - } - - driverCores, err := coresRequiredForSparkPod(spec.Driver.SparkPodSpec, 1) - if err != nil { - return ResourceList{}, err - } - - executorCores, err := coresRequiredForSparkPod(spec.Executor.SparkPodSpec, instances) - if err != nil { - return ResourceList{}, err - } - - return ResourceList{ - cpu: *resource.NewMilliQuantity(driverCores+executorCores, resource.DecimalSI), - memory: *resource.NewQuantity(driverMemory+executorMemory, resource.DecimalSI), - }, nil -} - -func sparkApplicationResourceUsage(sparkApp so.SparkApplication) (ResourceList, error) { - // A completed/failed SparkApplication consumes no resources - if !sparkApp.Status.TerminationTime.IsZero() || sparkApp.Status.AppState.State == so.FailedState || sparkApp.Status.AppState.State == so.CompletedState { - return ResourceList{}, nil - } - return resourceUsage(sparkApp.Spec) -} - -func scheduledSparkApplicationResourceUsage(sparkApp so.ScheduledSparkApplication) (ResourceList, error) { - // Failed validation, will consume no resources - if sparkApp.Status.ScheduleState == so.FailedValidationState { - return ResourceList{}, nil - } - return resourceUsage(sparkApp.Spec.Template) -} - -func podResourceUsage(pod *corev1.Pod) ResourceList { - spec := pod.Spec - var initCores int64 - var initMemoryBytes int64 - completed := make(map[string]struct{}) - - for _, containerStatus := range pod.Status.InitContainerStatuses { - if containerStatus.State.Terminated != nil { - completed[containerStatus.Name] = struct{}{} - } - } - for _, containerStatus := range pod.Status.ContainerStatuses { - if containerStatus.State.Terminated != nil { - completed[containerStatus.Name] = struct{}{} - } - } - - for _, container := range spec.InitContainers { - if _, present := completed[container.Name]; !present { - c, m := resourcesRequiredToSchedule(container.Resources) - initCores = max(c, initCores) - initMemoryBytes = max(m, initMemoryBytes) - } - } - var cores int64 - var memoryBytes int64 - for _, container := range spec.Containers { - if _, present := completed[container.Name]; !present { - c, m := resourcesRequiredToSchedule(container.Resources) - cores += c - memoryBytes += m - } - } - cores = max(initCores, cores) - memoryBytes = max(initMemoryBytes, memoryBytes) - return ResourceList{ - cpu: *resource.NewMilliQuantity(cores, resource.DecimalSI), - memory: *resource.NewQuantity(memoryBytes, resource.DecimalSI), - } -} diff --git a/pkg/webhook/resourceusage/util_test.go b/pkg/webhook/resourceusage/util_test.go deleted file mode 100644 index c610136e0..000000000 --- a/pkg/webhook/resourceusage/util_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package resourceusage - -import ( - "testing" -) - -func assertMemory(memoryString string, expectedBytes int64, t *testing.T) { - m, err := parseJavaMemoryString(memoryString) - if err != nil { - t.Error(err) - return - } - if m != expectedBytes { - t.Errorf("%s: expected %v bytes, got %v bytes", memoryString, expectedBytes, m) - return - } -} - -func TestJavaMemoryString(t *testing.T) { - assertMemory("1b", 1, t) - assertMemory("100k", 100*1024, t) - assertMemory("1gb", 1024*1024*1024, t) - assertMemory("10TB", 10*1024*1024*1024*1024, t) - assertMemory("10PB", 10*1024*1024*1024*1024*1024, t) -} diff --git a/pkg/webhook/resourceusage/watcher.go b/pkg/webhook/resourceusage/watcher.go deleted file mode 100644 index 49395bf11..000000000 --- a/pkg/webhook/resourceusage/watcher.go +++ /dev/null @@ -1,157 +0,0 @@ -package resourceusage - -import ( - "fmt" - "sync" - - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - - "github.com/golang/glog" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/client-go/informers" - corev1informers "k8s.io/client-go/informers/core/v1" - "k8s.io/client-go/tools/cache" -) - -type ResourceUsageWatcher struct { - currentUsageLock *sync.RWMutex - currentUsageByNamespace map[string]*ResourceList - usageByNamespacePod map[string]map[string]*ResourceList - usageByNamespaceScheduledApplication map[string]map[string]*ResourceList - usageByNamespaceApplication map[string]map[string]*ResourceList - crdInformerFactory crdinformers.SharedInformerFactory - coreV1InformerFactory informers.SharedInformerFactory - podInformer corev1informers.PodInformer -} - -// more convenient replacement for corev1.ResourceList -type ResourceList struct { - cpu resource.Quantity - memory resource.Quantity -} - -const ( - KindSparkApplication = "SparkApplication" - KindScheduledSparkApplication = "ScheduledSparkApplication" -) - -func (r ResourceList) String() string { - return fmt.Sprintf("cpu: %v mcpu, memory %v bytes", r.cpu.MilliValue(), r.memory.Value()) -} - -func newResourceUsageWatcher(crdInformerFactory crdinformers.SharedInformerFactory, coreV1InformerFactory informers.SharedInformerFactory) ResourceUsageWatcher { - glog.V(2).Infof("Creating new resource usage watcher") - r := ResourceUsageWatcher{ - crdInformerFactory: crdInformerFactory, - currentUsageLock: &sync.RWMutex{}, - coreV1InformerFactory: coreV1InformerFactory, - currentUsageByNamespace: make(map[string]*ResourceList), - usageByNamespacePod: make(map[string]map[string]*ResourceList), - usageByNamespaceScheduledApplication: make(map[string]map[string]*ResourceList), - usageByNamespaceApplication: make(map[string]map[string]*ResourceList), - } - // Note: Events for each handler are processed serially, so no coordination is needed between - // the different callbacks. Coordination is still needed around updating the shared state. - sparkApplicationInformer := r.crdInformerFactory.Sparkoperator().V1beta2().SparkApplications() - sparkApplicationInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: r.onSparkApplicationAdded, - UpdateFunc: r.onSparkApplicationUpdated, - DeleteFunc: r.onSparkApplicationDeleted, - }) - scheduledSparkApplicationInformer := r.crdInformerFactory.Sparkoperator().V1beta2().ScheduledSparkApplications() - scheduledSparkApplicationInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: r.onScheduledSparkApplicationAdded, - UpdateFunc: r.onScheduledSparkApplicationUpdated, - DeleteFunc: r.onScheduledSparkApplicationDeleted, - }) - r.podInformer = r.coreV1InformerFactory.Core().V1().Pods() - r.podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: r.onPodAdded, - UpdateFunc: r.onPodUpdated, - DeleteFunc: r.onPodDeleted, - }) - return r -} - -func (r *ResourceUsageWatcher) GetCurrentResourceUsage(namespace string) ResourceList { - r.currentUsageLock.RLock() - defer r.currentUsageLock.RUnlock() - if resourceUsageInternal, present := r.currentUsageByNamespace[namespace]; present { - return ResourceList{ - cpu: resourceUsageInternal.cpu, - memory: resourceUsageInternal.memory, - } - } - return ResourceList{} -} - -func (r *ResourceUsageWatcher) GetCurrentResourceUsageWithApplication(namespace, kind, name string) (namespaceResources, applicationResources ResourceList) { - r.currentUsageLock.RLock() - defer r.currentUsageLock.RUnlock() - if resourceUsageInternal, present := r.currentUsageByNamespace[namespace]; present { - var applicationResources ResourceList - var namespaceMap map[string]map[string]*ResourceList - switch kind { - case KindSparkApplication: - namespaceMap = r.usageByNamespaceApplication - case KindScheduledSparkApplication: - namespaceMap = r.usageByNamespaceScheduledApplication - } - if applicationMap, present := namespaceMap[namespace]; present { - if ar, present := applicationMap[name]; present { - applicationResources = *ar - } - } - currentUsage := *resourceUsageInternal // Creates a copy - currentUsage.cpu.Sub(applicationResources.cpu) - currentUsage.memory.Sub(applicationResources.memory) - return currentUsage, applicationResources - } - return ResourceList{}, ResourceList{} -} - -func (r *ResourceUsageWatcher) unsafeSetResources(namespace, name string, resources ResourceList, resourceMap map[string]map[string]*ResourceList) { - if _, present := resourceMap[namespace]; !present { - resourceMap[namespace] = make(map[string]*ResourceList) - } - // Clear any resource usage currently stored for this object - r.unsafeDeleteResources(namespace, name, resourceMap) - resourceMap[namespace][name] = &resources - if current, present := r.currentUsageByNamespace[namespace]; present { - current.cpu.Add(resources.cpu) - current.memory.Add(resources.memory) - } else { - r.currentUsageByNamespace[namespace] = &ResourceList{ - cpu: resources.cpu, - memory: resources.memory, - } - } -} - -func (r *ResourceUsageWatcher) unsafeDeleteResources(namespace, name string, resourceMap map[string]map[string]*ResourceList) { - if namespaceMap, present := resourceMap[namespace]; present { - if resources, present := namespaceMap[name]; present { - delete(resourceMap[namespace], name) - if current, present := r.currentUsageByNamespace[namespace]; present { - current.cpu.Sub(resources.cpu) - current.memory.Sub(resources.memory) - } - } - } -} - -func (r *ResourceUsageWatcher) setResources(typeName, namespace, name string, resources ResourceList, resourceMap map[string]map[string]*ResourceList) { - glog.V(3).Infof("Updating object %s %s/%s with resources %v", typeName, namespace, name, resources) - r.currentUsageLock.Lock() - r.unsafeSetResources(namespace, name, resources, resourceMap) - r.currentUsageLock.Unlock() - glog.V(3).Infof("Current resources for namespace %s: %v", namespace, r.currentUsageByNamespace[namespace]) -} - -func (r *ResourceUsageWatcher) deleteResources(typeName, namespace, name string, resourceMap map[string]map[string]*ResourceList) { - glog.V(3).Infof("Deleting resources from object %s/%s", namespace, name) - r.currentUsageLock.Lock() - r.unsafeDeleteResources(namespace, name, resourceMap) - r.currentUsageLock.Unlock() - glog.V(3).Infof("Current resources for namespace %s: %v", namespace, r.currentUsageByNamespace[namespace]) -} diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go deleted file mode 100644 index 2984e4641..000000000 --- a/pkg/webhook/webhook.go +++ /dev/null @@ -1,657 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "io" - "net/http" - "strings" - "time" - - "github.com/golang/glog" - admissionv1 "k8s.io/api/admission/v1" - arv1 "k8s.io/api/admissionregistration/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - - crdapi "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io" - crdv1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" - "github.com/kubeflow/spark-operator/pkg/webhook/resourceusage" -) - -const ( - webhookName = "webhook.sparkoperator.k8s.io" - quotaWebhookName = "quotaenforcer.sparkoperator.k8s.io" -) - -var podResource = metav1.GroupVersionResource{ - Group: corev1.SchemeGroupVersion.Group, - Version: corev1.SchemeGroupVersion.Version, - Resource: "pods", -} - -var sparkApplicationResource = metav1.GroupVersionResource{ - Group: crdapi.GroupName, - Version: crdv1beta2.Version, - Resource: "sparkapplications", -} - -var scheduledSparkApplicationResource = metav1.GroupVersionResource{ - Group: crdapi.GroupName, - Version: crdv1beta2.Version, - Resource: "scheduledsparkapplications", -} - -// WebHook encapsulates things needed to run the webhook. -type WebHook struct { - clientset kubernetes.Interface - informerFactory crinformers.SharedInformerFactory - lister crdlisters.SparkApplicationLister - server *http.Server - certProvider *certProvider - serviceRef *arv1.ServiceReference - failurePolicy arv1.FailurePolicyType - selector *metav1.LabelSelector - objectSelector *metav1.LabelSelector - sparkJobNamespace string - deregisterOnExit bool - enableResourceQuotaEnforcement bool - resourceQuotaEnforcer resourceusage.ResourceQuotaEnforcer - coreV1InformerFactory informers.SharedInformerFactory - timeoutSeconds *int32 -} - -// Configuration parsed from command-line flags -type webhookFlags struct { - webhookSecretName string - webhookSecretNamespace string - webhookServiceName string - webhookServiceNamespace string - webhookConfigName string - webhookPort int - webhookFailOnError bool - webhookNamespaceSelector string - webhookObjectSelector string -} - -var userConfig webhookFlags - -func init() { - flag.StringVar(&userConfig.webhookSecretName, "webhook-secret-name", "spark-operator-tls", "The name of the secret that contains the webhook server's TLS certificate and key.") - flag.StringVar(&userConfig.webhookSecretNamespace, "webhook-secret-namespace", "spark-operator", "The namespace of the secret that contains the webhook server's TLS certificate and key.") - flag.StringVar(&userConfig.webhookServiceName, "webhook-svc-name", "spark-webhook", "The name of the Service for the webhook server.") - flag.StringVar(&userConfig.webhookServiceNamespace, "webhook-svc-namespace", "spark-operator", "The namespace of the Service for the webhook server.") - flag.StringVar(&userConfig.webhookConfigName, "webhook-config-name", "spark-webhook-config", "The name of the MutatingWebhookConfiguration object to create.") - flag.IntVar(&userConfig.webhookPort, "webhook-port", 8080, "Service port of the webhook server.") - flag.BoolVar(&userConfig.webhookFailOnError, "webhook-fail-on-error", false, "Whether Kubernetes should reject requests when the webhook fails.") - flag.StringVar(&userConfig.webhookNamespaceSelector, "webhook-namespace-selector", "", "The webhook will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Required if webhook-fail-on-error is true.") - flag.StringVar(&userConfig.webhookObjectSelector, "webhook-object-selector", "", "The webhook will only operate on pods with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2).") -} - -// New creates a new WebHook instance. -func New( - clientset kubernetes.Interface, - informerFactory crinformers.SharedInformerFactory, - jobNamespace string, - deregisterOnExit bool, - enableResourceQuotaEnforcement bool, - coreV1InformerFactory informers.SharedInformerFactory, - webhookTimeout *int, -) (*WebHook, error) { - certProvider, err := NewCertProvider( - userConfig.webhookServiceName, - userConfig.webhookServiceNamespace, - ) - if err != nil { - return nil, fmt.Errorf("failed to create certificate provider: %v", err) - } - - path := "/webhook" - serviceRef := &arv1.ServiceReference{ - Namespace: userConfig.webhookServiceNamespace, - Name: userConfig.webhookServiceName, - Path: &path, - } - - hook := &WebHook{ - clientset: clientset, - informerFactory: informerFactory, - lister: informerFactory.Sparkoperator().V1beta2().SparkApplications().Lister(), - certProvider: certProvider, - serviceRef: serviceRef, - sparkJobNamespace: jobNamespace, - deregisterOnExit: deregisterOnExit, - failurePolicy: arv1.Ignore, - coreV1InformerFactory: coreV1InformerFactory, - enableResourceQuotaEnforcement: enableResourceQuotaEnforcement, - timeoutSeconds: func(b int32) *int32 { return &b }(int32(*webhookTimeout)), - } - - if userConfig.webhookFailOnError { - hook.failurePolicy = arv1.Fail - } - - if userConfig.webhookNamespaceSelector == "" { - if userConfig.webhookFailOnError { - return nil, fmt.Errorf("webhook-namespace-selector must be set when webhook-fail-on-error is true") - } - } else { - selector, err := parseSelector(userConfig.webhookNamespaceSelector) - if err != nil { - return nil, err - } - hook.selector = selector - } - - if userConfig.webhookObjectSelector != "" { - selector, err := metav1.ParseToLabelSelector(userConfig.webhookObjectSelector) - if err != nil { - return nil, err - } - hook.objectSelector = selector - } - - if enableResourceQuotaEnforcement { - hook.resourceQuotaEnforcer = resourceusage.NewResourceQuotaEnforcer(informerFactory, coreV1InformerFactory) - } - - mux := http.NewServeMux() - mux.HandleFunc(path, hook.serve) - hook.server = &http.Server{ - Addr: fmt.Sprintf(":%d", userConfig.webhookPort), - Handler: mux, - } - - return hook, nil -} - -func parseSelector(selectorArg string) (*metav1.LabelSelector, error) { - selector := &metav1.LabelSelector{ - MatchLabels: make(map[string]string), - } - - selectorStrs := strings.Split(selectorArg, ",") - for _, selectorStr := range selectorStrs { - kv := strings.SplitN(selectorStr, "=", 2) - if len(kv) != 2 || kv[0] == "" || kv[1] == "" { - return nil, fmt.Errorf("webhook selector must be in the form key1=value1,key2=value2") - } - selector.MatchLabels[kv[0]] = kv[1] - } - - return selector, nil -} - -// Start starts the admission webhook server and registers itself to the API server. -func (wh *WebHook) Start(stopCh <-chan struct{}) error { - wh.updateSecret(userConfig.webhookSecretName, userConfig.webhookSecretNamespace) - - tlsCfg, err := wh.certProvider.TLSConfig() - if err != nil { - return fmt.Errorf("failed to get TLS config: %v", err) - } - wh.server.TLSConfig = tlsCfg - - if wh.enableResourceQuotaEnforcement { - err := wh.resourceQuotaEnforcer.WaitForCacheSync(stopCh) - if err != nil { - return err - } - } - - go func() { - glog.Info("Starting the Spark admission webhook server") - if err := wh.server.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed { - glog.Errorf("error while serving the Spark admission webhook: %v\n", err) - } - }() - - return wh.selfRegistration(userConfig.webhookConfigName) -} - -// Stop deregisters itself with the API server and stops the admission webhook server. -func (wh *WebHook) Stop() error { - // Do not deregister if strict error handling is enabled; pod deletions are common, and we - // don't want to create windows where pods can be created without being subject to the webhook. - if wh.failurePolicy != arv1.Fail { - if err := wh.selfDeregistration(userConfig.webhookConfigName); err != nil { - return err - } - glog.Infof("Webhook %s deregistered", userConfig.webhookConfigName) - } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - glog.Info("Stopping the Spark pod admission webhook server") - return wh.server.Shutdown(ctx) -} - -func (wh *WebHook) serve(w http.ResponseWriter, r *http.Request) { - glog.V(2).Info("Serving admission request") - var body []byte - if r.Body != nil { - data, err := io.ReadAll(r.Body) - if err != nil { - internalError(w, fmt.Errorf("failed to read the request body")) - return - } - body = data - } - - if len(body) == 0 { - denyRequest(w, "empty request body", http.StatusBadRequest) - return - } - - contentType := r.Header.Get("Content-Type") - if contentType != "application/json" { - denyRequest(w, "invalid Content-Type, expected `application/json`", http.StatusUnsupportedMediaType) - return - } - - review := &admissionv1.AdmissionReview{} - deserializer := codecs.UniversalDeserializer() - if _, _, err := deserializer.Decode(body, nil, review); err != nil { - internalError(w, err) - return - } - var whErr error - var reviewResponse *admissionv1.AdmissionResponse - switch review.Request.Resource { - case podResource: - reviewResponse, whErr = mutatePods(review, wh.lister, wh.sparkJobNamespace) - case sparkApplicationResource: - if !wh.enableResourceQuotaEnforcement { - unexpectedResourceType(w, review.Request.Resource.String()) - return - } - reviewResponse, whErr = admitSparkApplications(review, wh.resourceQuotaEnforcer) - case scheduledSparkApplicationResource: - if !wh.enableResourceQuotaEnforcement { - unexpectedResourceType(w, review.Request.Resource.String()) - return - } - reviewResponse, whErr = admitScheduledSparkApplications(review, wh.resourceQuotaEnforcer) - default: - unexpectedResourceType(w, review.Request.Resource.String()) - return - } - if whErr != nil { - internalError(w, whErr) - return - } - - response := admissionv1.AdmissionReview{ - TypeMeta: metav1.TypeMeta{APIVersion: "admission.k8s.io/v1", Kind: "AdmissionReview"}, - Response: reviewResponse, - } - - if reviewResponse != nil { - if review.Request != nil { - response.Response.UID = review.Request.UID - } - } - - resp, err := json.Marshal(response) - if err != nil { - internalError(w, err) - return - } - if _, err := w.Write(resp); err != nil { - internalError(w, err) - } -} - -func (wh *WebHook) updateSecret(name, namespace string) error { - secret, err := wh.clientset.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to get webhook secret: %v", err) - } - - caKey, err := wh.certProvider.CAKey() - if err != nil { - return fmt.Errorf("failed to get CA key: %v", err) - } - - caCert, err := wh.certProvider.CACert() - if err != nil { - return fmt.Errorf("failed to get CA cert: %v", err) - } - - serverKey, err := wh.certProvider.ServerKey() - if err != nil { - return fmt.Errorf("failed to get server key: %v", err) - } - - serverCert, err := wh.certProvider.ServerCert() - if err != nil { - return fmt.Errorf("failed to get server cert: %v", err) - } - - newSecret := corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Data: map[string][]byte{ - "ca-key.pem": caKey, - "ca-cert.pem": caCert, - "server-key.pem": serverKey, - "server-cert.pem": serverCert, - }, - } - - if !equality.Semantic.DeepEqual(newSecret, secret) { - secret.Data = newSecret.Data - _, err := wh.clientset.CoreV1().Secrets(namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("failed to update webhook secret: %v", err) - } - } - - glog.Infof("Updated webhook secret %s/%s", namespace, name) - return nil -} - -func unexpectedResourceType(w http.ResponseWriter, kind string) { - denyRequest(w, fmt.Sprintf("unexpected resource type: %v", kind), http.StatusUnsupportedMediaType) -} - -func internalError(w http.ResponseWriter, err error) { - glog.Errorf("internal error: %v", err) - denyRequest(w, err.Error(), 500) -} - -func denyRequest(w http.ResponseWriter, reason string, code int) { - response := &admissionv1.AdmissionReview{ - Response: &admissionv1.AdmissionResponse{ - Allowed: false, - Result: &metav1.Status{ - Code: int32(code), - Message: reason, - }, - }, - } - resp, err := json.Marshal(response) - if err != nil { - glog.Error(err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(code) - _, err = w.Write(resp) - if err != nil { - glog.Errorf("failed to write response body: %v", err) - } -} - -func (wh *WebHook) selfRegistration(webhookConfigName string) error { - caBundle, err := wh.certProvider.CACert() - if err != nil { - return fmt.Errorf("failed to get CA certificate: %v", err) - } - - mwcClient := wh.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations() - vwcClient := wh.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations() - - mutatingRules := []arv1.RuleWithOperations{ - { - Operations: []arv1.OperationType{arv1.Create}, - Rule: arv1.Rule{ - APIGroups: []string{""}, - APIVersions: []string{"v1"}, - Resources: []string{"pods"}, - }, - }, - } - - validatingRules := []arv1.RuleWithOperations{ - { - Operations: []arv1.OperationType{arv1.Create, arv1.Update}, - Rule: arv1.Rule{ - APIGroups: []string{crdapi.GroupName}, - APIVersions: []string{crdv1beta2.Version}, - Resources: []string{sparkApplicationResource.Resource, scheduledSparkApplicationResource.Resource}, - }, - }, - } - - sideEffect := arv1.SideEffectClassNoneOnDryRun - - mutatingWebhook := arv1.MutatingWebhook{ - Name: webhookName, - Rules: mutatingRules, - ClientConfig: arv1.WebhookClientConfig{ - Service: wh.serviceRef, - CABundle: caBundle, - }, - FailurePolicy: &wh.failurePolicy, - NamespaceSelector: wh.selector, - ObjectSelector: wh.objectSelector, - TimeoutSeconds: wh.timeoutSeconds, - SideEffects: &sideEffect, - AdmissionReviewVersions: []string{"v1"}, - } - - validatingWebhook := arv1.ValidatingWebhook{ - Name: quotaWebhookName, - Rules: validatingRules, - ClientConfig: arv1.WebhookClientConfig{ - Service: wh.serviceRef, - CABundle: caBundle, - }, - FailurePolicy: &wh.failurePolicy, - NamespaceSelector: wh.selector, - ObjectSelector: wh.objectSelector, - TimeoutSeconds: wh.timeoutSeconds, - SideEffects: &sideEffect, - AdmissionReviewVersions: []string{"v1"}, - } - - mutatingWebhooks := []arv1.MutatingWebhook{mutatingWebhook} - validatingWebhooks := []arv1.ValidatingWebhook{validatingWebhook} - - mutatingExisting, mutatingGetErr := mwcClient.Get(context.TODO(), webhookConfigName, metav1.GetOptions{}) - if mutatingGetErr != nil { - if !errors.IsNotFound(mutatingGetErr) { - return mutatingGetErr - } - // Create case. - glog.Info("Creating a MutatingWebhookConfiguration for the Spark pod admission webhook") - webhookConfig := &arv1.MutatingWebhookConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: webhookConfigName, - }, - Webhooks: mutatingWebhooks, - } - if _, err := mwcClient.Create(context.TODO(), webhookConfig, metav1.CreateOptions{}); err != nil { - return err - } - } else { - // Update case. - glog.Info("Updating existing MutatingWebhookConfiguration for the Spark pod admission webhook") - if !equality.Semantic.DeepEqual(mutatingWebhooks, mutatingExisting.Webhooks) { - mutatingExisting.Webhooks = mutatingWebhooks - if _, err := mwcClient.Update(context.TODO(), mutatingExisting, metav1.UpdateOptions{}); err != nil { - return err - } - } - } - - if wh.enableResourceQuotaEnforcement { - validatingExisting, validatingGetErr := vwcClient.Get(context.TODO(), webhookConfigName, metav1.GetOptions{}) - if validatingGetErr != nil { - if !errors.IsNotFound(validatingGetErr) { - return validatingGetErr - } - // Create case. - glog.Info("Creating a ValidatingWebhookConfiguration for the SparkApplication resource quota enforcement webhook") - webhookConfig := &arv1.ValidatingWebhookConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: webhookConfigName, - }, - Webhooks: validatingWebhooks, - } - if _, err := vwcClient.Create(context.TODO(), webhookConfig, metav1.CreateOptions{}); err != nil { - return err - } - - } else { - // Update case. - glog.Info("Updating existing ValidatingWebhookConfiguration for the SparkApplication resource quota enforcement webhook") - if !equality.Semantic.DeepEqual(validatingWebhooks, validatingExisting.Webhooks) { - validatingExisting.Webhooks = validatingWebhooks - if _, err := vwcClient.Update(context.TODO(), validatingExisting, metav1.UpdateOptions{}); err != nil { - return err - } - } - } - } - return nil -} - -func (wh *WebHook) selfDeregistration(webhookConfigName string) error { - mutatingConfigs := wh.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations() - validatingConfigs := wh.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations() - if wh.enableResourceQuotaEnforcement { - err := validatingConfigs.Delete(context.TODO(), webhookConfigName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - if err != nil { - return err - } - } - return mutatingConfigs.Delete(context.TODO(), webhookConfigName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) -} - -func admitSparkApplications(review *admissionv1.AdmissionReview, enforcer resourceusage.ResourceQuotaEnforcer) (*admissionv1.AdmissionResponse, error) { - if review.Request.Resource != sparkApplicationResource { - return nil, fmt.Errorf("expected resource to be %s, got %s", sparkApplicationResource, review.Request.Resource) - } - - raw := review.Request.Object.Raw - app := &crdv1beta2.SparkApplication{} - if err := json.Unmarshal(raw, app); err != nil { - return nil, fmt.Errorf("failed to unmarshal a SparkApplication from the raw data in the admission request: %v", err) - } - - reason, err := enforcer.AdmitSparkApplication(*app) - if err != nil { - return nil, fmt.Errorf("resource quota enforcement failed for SparkApplication: %v", err) - } - response := &admissionv1.AdmissionResponse{Allowed: reason == ""} - if reason != "" { - response.Result = &metav1.Status{ - Message: reason, - Code: 400, - } - } - return response, nil -} - -func admitScheduledSparkApplications(review *admissionv1.AdmissionReview, enforcer resourceusage.ResourceQuotaEnforcer) (*admissionv1.AdmissionResponse, error) { - if review.Request.Resource != scheduledSparkApplicationResource { - return nil, fmt.Errorf("expected resource to be %s, got %s", scheduledSparkApplicationResource, review.Request.Resource) - } - - raw := review.Request.Object.Raw - app := &crdv1beta2.ScheduledSparkApplication{} - if err := json.Unmarshal(raw, app); err != nil { - return nil, fmt.Errorf("failed to unmarshal a ScheduledSparkApplication from the raw data in the admission request: %v", err) - } - - response := &admissionv1.AdmissionResponse{Allowed: true} - reason, err := enforcer.AdmitScheduledSparkApplication(*app) - if err != nil { - return nil, fmt.Errorf("resource quota enforcement failed for ScheduledSparkApplication: %v", err) - } else if reason != "" { - response.Allowed = false - response.Result = &metav1.Status{ - Message: reason, - Code: 400, - } - } - return response, nil -} - -func mutatePods( - review *admissionv1.AdmissionReview, - lister crdlisters.SparkApplicationLister, - sparkJobNs string, -) (*admissionv1.AdmissionResponse, error) { - raw := review.Request.Object.Raw - pod := &corev1.Pod{} - if err := json.Unmarshal(raw, pod); err != nil { - return nil, fmt.Errorf("failed to unmarshal a Pod from the raw data in the admission request: %v", err) - } - - response := &admissionv1.AdmissionResponse{Allowed: true} - - if !isSparkPod(pod) || !inSparkJobNamespace(review.Request.Namespace, sparkJobNs) { - glog.V(2).Infof("Pod %s in namespace %s is not subject to mutation", pod.GetObjectMeta().GetName(), review.Request.Namespace) - return response, nil - } - - // Try getting the SparkApplication name from the annotation for that. - appName := pod.Labels[config.SparkAppNameLabel] - if appName == "" { - return response, nil - } - app, err := lister.SparkApplications(review.Request.Namespace).Get(appName) - if err != nil { - return nil, fmt.Errorf("failed to get SparkApplication %s/%s: %v", review.Request.Namespace, appName, err) - } - - patchOps := patchSparkPod(pod, app) - if len(patchOps) > 0 { - glog.V(2).Infof("Pod %s in namespace %s is subject to mutation", pod.GetObjectMeta().GetName(), review.Request.Namespace) - patchBytes, err := json.Marshal(patchOps) - if err != nil { - return nil, fmt.Errorf("failed to marshal patch operations %v: %v", patchOps, err) - } - glog.V(3).Infof("Pod %s mutation/patch result %s", pod.GetObjectMeta().GetName(), patchBytes) - response.Patch = patchBytes - patchType := admissionv1.PatchTypeJSONPatch - response.PatchType = &patchType - } - - return response, nil -} - -func inSparkJobNamespace(podNs string, sparkJobNamespace string) bool { - if sparkJobNamespace == corev1.NamespaceAll { - return true - } - return podNs == sparkJobNamespace -} - -func isSparkPod(pod *corev1.Pod) bool { - return util.IsLaunchedBySparkOperator(pod) && (util.IsDriverPod(pod) || util.IsExecutorPod(pod)) -} - -func int64ptr(n int64) *int64 { - return &n -} diff --git a/pkg/webhook/webhook_test.go b/pkg/webhook/webhook_test.go deleted file mode 100644 index 6f2e2f088..000000000 --- a/pkg/webhook/webhook_test.go +++ /dev/null @@ -1,310 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "context" - "encoding/json" - "testing" - "time" - - "github.com/stretchr/testify/assert" - admissionv1 "k8s.io/api/admission/v1" - arv1 "k8s.io/api/admissionregistration/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes/fake" - gotest "k8s.io/client-go/testing" - - spov1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientfake "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/fake" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - "github.com/kubeflow/spark-operator/pkg/config" -) - -func TestMutatePod(t *testing.T) { - crdClient := crdclientfake.NewSimpleClientset() - informerFactory := crdinformers.NewSharedInformerFactory(crdClient, 0*time.Second) - informer := informerFactory.Sparkoperator().V1beta2().SparkApplications() - lister := informer.Lister() - - pod1 := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-driver", - Namespace: "default", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: config.SparkDriverContainerName, - Image: "spark-driver:latest", - }, - }, - }, - } - - // 1. Testing processing non-Spark pod. - podBytes, err := serializePod(pod1) - if err != nil { - t.Error(err) - } - review := &admissionv1.AdmissionReview{ - Request: &admissionv1.AdmissionRequest{ - Resource: metav1.GroupVersionResource{ - Group: corev1.SchemeGroupVersion.Group, - Version: corev1.SchemeGroupVersion.Version, - Resource: "pods", - }, - Object: runtime.RawExtension{ - Raw: podBytes, - }, - Namespace: "default", - }, - } - response, _ := mutatePods(review, lister, "default") - assert.True(t, response.Allowed) - - // 2. Test processing Spark pod with only one patch: adding an OwnerReference. - app1 := &spov1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-app1", - Namespace: "default", - }, - } - crdClient.SparkoperatorV1beta2().SparkApplications(app1.Namespace).Create(context.TODO(), app1, metav1.CreateOptions{}) - informer.Informer().GetIndexer().Add(app1) - pod1.Labels = map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", - config.SparkAppNameLabel: app1.Name, - } - podBytes, err = serializePod(pod1) - if err != nil { - t.Error(err) - } - review.Request.Object.Raw = podBytes - response, _ = mutatePods(review, lister, "default") - assert.True(t, response.Allowed) - assert.Equal(t, admissionv1.PatchTypeJSONPatch, *response.PatchType) - assert.True(t, len(response.Patch) > 0) - - // 3. Test processing Spark pod with patches. - var user int64 = 1000 - app2 := &spov1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-app2", - Namespace: "default", - }, - Spec: spov1beta2.SparkApplicationSpec{ - Volumes: []corev1.Volume{ - { - Name: "spark", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/spark", - }, - }, - }, - { - Name: "unused", // Expect this to not be added to the driver. - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, - }, - Driver: spov1beta2.DriverSpec{ - SparkPodSpec: spov1beta2.SparkPodSpec{ - VolumeMounts: []corev1.VolumeMount{ - { - Name: "spark", - MountPath: "/mnt/spark", - }, - }, - Affinity: &corev1.Affinity{ - PodAffinity: &corev1.PodAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ - { - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{config.SparkRoleLabel: config.SparkDriverRole}, - }, - TopologyKey: "kubernetes.io/hostname", - }, - }, - }, - }, - Tolerations: []corev1.Toleration{ - { - Key: "Key", - Operator: "Equal", - Value: "Value", - Effect: "NoEffect", - }, - }, - SecurityContext: &corev1.SecurityContext{ - RunAsUser: &user, - }, - }, - }, - }, - } - crdClient.SparkoperatorV1beta2().SparkApplications(app2.Namespace).Update(context.TODO(), app2, metav1.UpdateOptions{}) - informer.Informer().GetIndexer().Add(app2) - - pod1.Labels[config.SparkAppNameLabel] = app2.Name - podBytes, err = serializePod(pod1) - if err != nil { - t.Error(err) - } - review.Request.Object.Raw = podBytes - response, _ = mutatePods(review, lister, "default") - assert.True(t, response.Allowed) - assert.Equal(t, admissionv1.PatchTypeJSONPatch, *response.PatchType) - assert.True(t, len(response.Patch) > 0) - var patchOps []*patchOperation - json.Unmarshal(response.Patch, &patchOps) - assert.Equal(t, 6, len(patchOps)) -} - -func serializePod(pod *corev1.Pod) ([]byte, error) { - return json.Marshal(pod) -} - -func TestSelfRegistrationWithObjectSelector(t *testing.T) { - clientset := fake.NewSimpleClientset() - informerFactory := crdinformers.NewSharedInformerFactory(nil, 0) - coreV1InformerFactory := informers.NewSharedInformerFactory(nil, 0) - - // Setup userConfig with object selector - userConfig.webhookObjectSelector = "spark-role in (driver,executor)" - webhookTimeout := 30 - - // Create webhook instance - webhook, err := New(clientset, informerFactory, "default", false, false, coreV1InformerFactory, &webhookTimeout) - assert.NoError(t, err) - - // Mock the clientset's Create function to capture the MutatingWebhookConfiguration object - var createdWebhookConfig *arv1.MutatingWebhookConfiguration - clientset.PrependReactor("create", "mutatingwebhookconfigurations", func(action gotest.Action) (handled bool, ret runtime.Object, err error) { - createAction := action.(gotest.CreateAction) - createdWebhookConfig = createAction.GetObject().(*arv1.MutatingWebhookConfiguration) - return true, createdWebhookConfig, nil - }) - - // Call the selfRegistration method - err = webhook.selfRegistration("test-webhook-config") - assert.NoError(t, err) - - // Verify the MutatingWebhookConfiguration was created with the expected object selector - assert.NotNil(t, createdWebhookConfig, "MutatingWebhookConfiguration should have been created") - - expectedSelector := &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "spark-role", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"driver", "executor"}, - }, - }, - } - actualSelector := createdWebhookConfig.Webhooks[0].ObjectSelector - - assert.True(t, labelSelectorsEqual(expectedSelector, actualSelector), "ObjectSelectors should be equal") -} - -func labelSelectorsEqual(expected, actual *metav1.LabelSelector) bool { - if expected == nil || actual == nil { - return expected == nil && actual == nil - } - - if len(expected.MatchLabels) != len(actual.MatchLabels) { - return false - } - - for k, v := range expected.MatchLabels { - if actual.MatchLabels[k] != v { - return false - } - } - - if len(expected.MatchExpressions) != len(actual.MatchExpressions) { - return false - } - - for i, expr := range expected.MatchExpressions { - if expr.Key != actual.MatchExpressions[i].Key || - expr.Operator != actual.MatchExpressions[i].Operator || - !equalStringSlices(expr.Values, actual.MatchExpressions[i].Values) { - return false - } - } - - return true -} - -func equalStringSlices(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} - -func testSelector(input string, expected *metav1.LabelSelector, t *testing.T) { - selector, err := parseSelector(input) - - if expected == nil { - if err == nil { - t.Errorf("Expected error parsing '%s', but got %v", input, selector) - } - } else { - if err != nil { - t.Errorf("Parsing '%s' failed: %v", input, err) - return - } - if !equality.Semantic.DeepEqual(*selector, *expected) { - t.Errorf("Parsing '%s' failed: expected %v, got %v", input, expected, selector) - } - } -} - -func TestNamespaceSelectorParsing(t *testing.T) { - testSelector("invalid", nil, t) - testSelector("=invalid", nil, t) - testSelector("invalid=", nil, t) - testSelector("in,val,id", nil, t) - testSelector(",inval=id,inval2=id2", nil, t) - testSelector("inval=id,inval2=id2,", nil, t) - testSelector("val=id,invalid", nil, t) - testSelector("val=id", &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "val": "id", - }, - }, t) - testSelector("val=id,val2=id2", &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "val": "id", - "val2": "id2", - }, - }, t) -} diff --git a/sparkctl/README.md b/sparkctl/README.md index 70bd03535..188006e13 100644 --- a/sparkctl/README.md +++ b/sparkctl/README.md @@ -5,14 +5,15 @@ To build `sparkctl`, make sure you followed build steps [here](https://github.com/kubeflow/spark-operator/blob/master/docs/developer-guide.md#build-the-operator) and have all the dependencies, then run the following command from within `sparkctl/`: ```bash -$ go build -o sparkctl +go build -o sparkctl ``` ## Flags The following global flags are available for all the sub commands: + * `--namespace`: the Kubernetes namespace of the `SparkApplication`(s). Defaults to `default`. -* `--kubeconfig`: the path to the file storing configuration for accessing the Kubernetes API server. Defaults to +* `--kubeconfig`: the path to the file storing configuration for accessing the Kubernetes API server. Defaults to `$HOME/.kube/config` ## Available Commands @@ -22,18 +23,21 @@ The following global flags are available for all the sub commands: `create` is a sub command of `sparkctl` for creating a `SparkApplication` object. There are two ways to create a `SparkApplication` object. One is parsing and creating a `SparkApplication` object in namespace specified by `--namespace` the from a given YAML file. In this way, `create` parses the YAML file, and sends the parsed `SparkApplication` object parsed to the Kubernetes API server. Usage of this way looks like the following: Usage: + ```bash -$ sparkctl create +sparkctl create ``` + The other way is creating a `SparkApplication` object from a named `ScheduledSparkApplication` to manually force a run of the `ScheduledSparkApplication`. Usage of this way looks like the following: Usage: + ```bash -$ sparkctl create --from +sparkctl create --from ``` -The `create` command also supports shipping local Hadoop configuration files into the driver and executor pods. Specifically, it detects local Hadoop configuration files located at the path specified by the -environment variable `HADOOP_CONF_DIR`, create a Kubernetes `ConfigMap` from the files, and adds the `ConfigMap` to the `SparkApplication` object so it gets mounted into the driver and executor pods by the operator. The environment variable `HADOOP_CONF_DIR` is also set in the driver and executor containers. +The `create` command also supports shipping local Hadoop configuration files into the driver and executor pods. Specifically, it detects local Hadoop configuration files located at the path specified by the +environment variable `HADOOP_CONF_DIR`, create a Kubernetes `ConfigMap` from the files, and adds the `ConfigMap` to the `SparkApplication` object so it gets mounted into the driver and executor pods by the operator. The environment variable `HADOOP_CONF_DIR` is also set in the driver and executor containers. #### Staging local dependencies @@ -41,26 +45,27 @@ The `create` command also supports staging local application dependencies, thoug ##### Uploading to GCS -For uploading to GCS, the value should be in the form of `gs://`. The bucket must exist and uploading fails if otherwise. The local dependencies will be uploaded to the path -`spark-app-dependencies//` in the given bucket. It replaces the file path of each local dependency with the URI of the remote copy in the parsed `SparkApplication` object if uploading is successful. +For uploading to GCS, the value should be in the form of `gs://`. The bucket must exist and uploading fails if otherwise. The local dependencies will be uploaded to the path +`spark-app-dependencies//` in the given bucket. It replaces the file path of each local dependency with the URI of the remote copy in the parsed `SparkApplication` object if uploading is successful. -Note that uploading to GCS requires a GCP service account with the necessary IAM permission to use the GCP project specified by service account JSON key file (`serviceusage.services.use`) and the permission to create GCS objects (`storage.object.create`). -The service account JSON key file must be locally available and be pointed to by the environment variable -`GOOGLE_APPLICATION_CREDENTIALS`. For more information on IAM authentication, please check +Note that uploading to GCS requires a GCP service account with the necessary IAM permission to use the GCP project specified by service account JSON key file (`serviceusage.services.use`) and the permission to create GCS objects (`storage.object.create`). +The service account JSON key file must be locally available and be pointed to by the environment variable +`GOOGLE_APPLICATION_CREDENTIALS`. For more information on IAM authentication, please check [Getting Started with Authentication](https://cloud.google.com/docs/authentication/getting-started). Usage: + ```bash -$ export GOOGLE_APPLICATION_CREDENTIALS="[PATH]/[FILE_NAME].json" -$ sparkctl create --upload-to gs:// +export GOOGLE_APPLICATION_CREDENTIALS="[PATH]/[FILE_NAME].json" +sparkctl create --upload-to gs:// ``` -By default, the uploaded dependencies are not made publicly accessible and are referenced using URIs in the form of `gs://bucket/path/to/file`. Such dependencies are referenced through URIs of the form `gs://bucket/path/to/file`. To download the dependencies from GCS, a custom-built Spark init-container with the [GCS connector](https://cloud.google.com/dataproc/docs/concepts/connectors/cloud-storage) installed and necessary Hadoop configuration properties specified is needed. An example Docker file of such an init-container can be found [here](https://gist.github.com/liyinan926/f9e81f7b54d94c05171a663345eb58bf). +By default, the uploaded dependencies are not made publicly accessible and are referenced using URIs in the form of `gs://bucket/path/to/file`. Such dependencies are referenced through URIs of the form `gs://bucket/path/to/file`. To download the dependencies from GCS, a custom-built Spark init-container with the [GCS connector](https://cloud.google.com/dataproc/docs/concepts/connectors/cloud-storage) installed and necessary Hadoop configuration properties specified is needed. An example Docker file of such an init-container can be found [here](https://gist.github.com/liyinan926/f9e81f7b54d94c05171a663345eb58bf). If you want to make uploaded dependencies publicly available so they can be downloaded by the built-in init-container, simply add `--public` to the `create` command, as the following example shows: ```bash -$ sparkctl create --upload-to gs:// --public +sparkctl create --upload-to gs:// --public ``` Publicly available files are referenced through URIs of the form `https://storage.googleapis.com/bucket/path/to/file`. @@ -71,67 +76,71 @@ For uploading to S3, the value should be in the form of `s3://`. The buc `spark-app-dependencies//` in the given bucket. It replaces the file path of each local dependency with the URI of the remote copy in the parsed `SparkApplication` object if uploading is successful. Note that uploading to S3 with [AWS SDK](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) requires credentials to be specified. For GCP, the S3 Interoperability credentials can be retrieved as described [here](https://cloud.google.com/storage/docs/migrating#keys). -SDK uses the default credential provider chain to find AWS credentials. +SDK uses the default credential provider chain to find AWS credentials. The SDK uses the first provider in the chain that returns credentials without an error. The default provider chain looks for credentials in the following order: -- Environment variables +* Environment variables + ``` AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY ``` + - Shared credentials file (.aws/credentials) For more information about AWS SDK authentication, please check [Specifying Credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials). Usage: + ```bash -$ export AWS_ACCESS_KEY_ID=[KEY] -$ export AWS_SECRET_ACCESS_KEY=[SECRET] -$ sparkctl create --upload-to s3:// +export AWS_ACCESS_KEY_ID=[KEY] +export AWS_SECRET_ACCESS_KEY=[SECRET] +sparkctl create --upload-to s3:// ``` By default, the uploaded dependencies are not made publicly accessible and are referenced using URIs in the form of `s3a://bucket/path/to/file`. To download the dependencies from S3, a custom-built Spark Docker image with the required jars for `S3A Connector` (`hadoop-aws-2.7.6.jar`, `aws-java-sdk-1.7.6.jar` for Spark build with Hadoop2.7 profile, or `hadoop-aws-3.1.0.jar`, `aws-java-sdk-bundle-1.11.271.jar` for Hadoop3.1) need to be available in the classpath, and `spark-default.conf` with the AWS keys and the S3A FileSystemClass needs to be set (you can also use `spec.hadoopConf` in the SparkApplication YAML): -``` +```properties spark.hadoop.fs.s3a.endpoint https://storage.googleapis.com spark.hadoop.fs.s3a.access.key [KEY] spark.hadoop.fs.s3a.secret.key [SECRET] spark.hadoop.fs.s3a.impl org.apache.hadoop.fs.s3a.S3AFileSystem ``` -NOTE: In Spark 2.3 init-containers are used for downloading remote application dependencies. In future versions, init-containers are removed. -It is recommended to use Apache Spark 2.4 for staging local dependencies with `s3`, which currently requires building a custom Docker image from the Spark master branch. Additionally, since Spark 2.4.0 +NOTE: In Spark 2.3 init-containers are used for downloading remote application dependencies. In future versions, init-containers are removed. +It is recommended to use Apache Spark 2.4 for staging local dependencies with `s3`, which currently requires building a custom Docker image from the Spark master branch. Additionally, since Spark 2.4.0 there are two available build profiles, Hadoop2.7 and Hadoop3.1. For use of Spark with `S3A Connector`, Hadoop3.1 profile is recommended as this allows to use newer version of `aws-java-sdk-bundle`. -If you want to use custom S3 endpoint or region, add `--upload-to-endpoint` and `--upload-to-region`: +If you want to use custom S3 endpoint or region, add `--upload-to-endpoint` and `--upload-to-region`: ```bash -$ sparkctl create --upload-to-endpoint https:// --upload-to-region --upload-to s3:// +sparkctl create --upload-to-endpoint https:// --upload-to-region --upload-to s3:// ``` -If you want to force path style URLs for S3 objects add `--s3-force-path-style`: +If you want to force path style URLs for S3 objects add `--s3-force-path-style`: ```bash -$ sparkctl create --s3-force-path-style +sparkctl create --s3-force-path-style ``` If you want to make uploaded dependencies publicly available, add `--public` to the `create` command, as the following example shows: ```bash -$ sparkctl create --upload-to s3:// --public +sparkctl create --upload-to s3:// --public ``` Publicly available files are referenced through URIs in the default form `https:///bucket/path/to/file`. ### List -`list` is a sub command of `sparkctl` for listing `SparkApplication` objects in the namespace specified by +`list` is a sub command of `sparkctl` for listing `SparkApplication` objects in the namespace specified by `--namespace`. Usage: + ```bash -$ sparkctl list +sparkctl list ``` ### Status @@ -139,21 +148,23 @@ $ sparkctl list `status` is a sub command of `sparkctl` for checking and printing the status of a `SparkApplication` in the namespace specified by `--namespace`. Usage: + ```bash -$ sparkctl status +sparkctl status ``` ### Event -`event` is a sub command of `sparkctl` for listing `SparkApplication` events in the namespace -specified by `--namespace`. +`event` is a sub command of `sparkctl` for listing `SparkApplication` events in the namespace +specified by `--namespace`. -The `event` command also supports streaming the events with the `--follow` or `-f` flag. +The `event` command also supports streaming the events with the `--follow` or `-f` flag. The command will display events since last creation of the `SparkApplication` for the specific `name`, and continues to stream events even if `ResourceVersion` changes. Usage: + ```bash -$ sparkctl event [-f] +sparkctl event [-f] ``` ### Log @@ -163,8 +174,9 @@ $ sparkctl event [-f] The `log` command also supports streaming the driver or executor logs with the `--follow` or `-f` flag. It works in the same way as `kubectl logs -f`, i.e., it streams logs until no more logs are available. Usage: + ```bash -$ sparkctl log [-e ] [-f] +sparkctl log [-e ] [-f] ``` ### Delete @@ -172,17 +184,19 @@ $ sparkctl log [-e ] [-f] `delete` is a sub command of `sparkctl` for deleting a `SparkApplication` with the given name in the namespace specified by `--namespace`. Usage: + ```bash -$ sparkctl delete +sparkctl delete ``` ### Forward -`forward` is a sub command of `sparkctl` for doing port forwarding from a local port to the Spark web UI port on the driver. It allows the Spark web UI served in the driver pod to be accessed locally. By default, it forwards from local port `4040` to remote port `4040`, which is the default Spark web UI port. Users can specify different local port and remote port using the flags `--local-port` and `--remote-port`, respectively. +`forward` is a sub command of `sparkctl` for doing port forwarding from a local port to the Spark web UI port on the driver. It allows the Spark web UI served in the driver pod to be accessed locally. By default, it forwards from local port `4040` to remote port `4040`, which is the default Spark web UI port. Users can specify different local port and remote port using the flags `--local-port` and `--remote-port`, respectively. Usage: + ```bash -$ sparkctl forward [--local-port ] [--remote-port ] +sparkctl forward [--local-port ] [--remote-port ] ``` Once port forwarding starts, users can open `127.0.0.1:` or `localhost:` in a browser to access the Spark web UI. Forwarding continues until it is interrupted or the driver pod terminates. diff --git a/sparkctl/build.sh b/sparkctl/build.sh index 669a86ce7..f4cca33ae 100755 --- a/sparkctl/build.sh +++ b/sparkctl/build.sh @@ -13,15 +13,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -SCRIPT=`basename ${BASH_SOURCE[0]}` -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )" + +SCRIPT=$(basename ${BASH_SOURCE[0]}) +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)" set -e platforms=("linux:amd64" "darwin:amd64") -for platform in "${platforms[@]}" -do - GOOS="${platform%%:*}" - GOARCH="${platform#*:}" - echo $GOOS - echo $GOARCH - CGO_ENABLED=0 GOOS=$GOOS GOARCH=$GOARCH go build -buildvcs=false -o sparkctl-${GOOS}-${GOARCH} +for platform in "${platforms[@]}"; do + GOOS="${platform%%:*}" + GOARCH="${platform#*:}" + echo $GOOS + echo $GOARCH + CGO_ENABLED=0 GOOS=$GOOS GOARCH=$GOARCH go build -buildvcs=false -o sparkctl-${GOOS}-${GOARCH} done diff --git a/sparkctl/cmd/client.go b/sparkctl/cmd/client.go index b28004504..e22d26afa 100644 --- a/sparkctl/cmd/client.go +++ b/sparkctl/cmd/client.go @@ -25,7 +25,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/api/v1beta2" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) diff --git a/sparkctl/cmd/create.go b/sparkctl/cmd/create.go index 1809b3d8f..49ac4bee0 100644 --- a/sparkctl/cmd/create.go +++ b/sparkctl/cmd/create.go @@ -19,7 +19,6 @@ package cmd import ( "context" "fmt" - "io/ioutil" "net/url" "os" "path/filepath" @@ -36,7 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" clientset "k8s.io/client-go/kubernetes" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/api/v1beta2" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) @@ -293,12 +292,12 @@ func filterLocalFiles(files []string) ([]string, error) { } func isLocalFile(file string) (bool, error) { - fileUrl, err := url.Parse(file) + fileURL, err := url.Parse(file) if err != nil { return false, err } - if fileUrl.Scheme == "file" || fileUrl.Scheme == "" { + if fileURL.Scheme == "file" || fileURL.Scheme == "" { return true, nil } @@ -332,7 +331,7 @@ func (uh uploadHandler) uploadToBucket(uploadPath, localFilePath string) (string fmt.Printf("uploading local file: %s\n", fileName) // Prepare the file for upload. - data, err := ioutil.ReadFile(localFilePath) + data, err := os.ReadFile(localFilePath) if err != nil { return "", fmt.Errorf("failed to read file: %s", err) } @@ -387,21 +386,21 @@ func uploadLocalDependencies(app *v1beta2.SparkApplication, files []string) ([]s "unable to upload local dependencies: no upload location specified via --upload-to") } - uploadLocationUrl, err := url.Parse(UploadToPath) + uploadLocationURL, err := url.Parse(UploadToPath) if err != nil { return nil, err } - uploadBucket := uploadLocationUrl.Host + uploadBucket := uploadLocationURL.Host var uh *uploadHandler ctx := context.Background() - switch uploadLocationUrl.Scheme { + switch uploadLocationURL.Scheme { case "gs": uh, err = newGCSBlob(ctx, uploadBucket, UploadToEndpoint, UploadToRegion) case "s3": uh, err = newS3Blob(ctx, uploadBucket, UploadToEndpoint, UploadToRegion, S3ForcePathStyle) default: - return nil, fmt.Errorf("unsupported upload location URL scheme: %s", uploadLocationUrl.Scheme) + return nil, fmt.Errorf("unsupported upload location URL scheme: %s", uploadLocationURL.Scheme) } // Check if bucket has been successfully setup @@ -457,7 +456,7 @@ func buildHadoopConfigMap(appName string, hadoopConfDir string) (*apiv1.ConfigMa return nil, fmt.Errorf("%s is not a directory", hadoopConfDir) } - files, err := ioutil.ReadDir(hadoopConfDir) + files, err := os.ReadDir(hadoopConfDir) if err != nil { return nil, err } @@ -472,7 +471,7 @@ func buildHadoopConfigMap(appName string, hadoopConfDir string) (*apiv1.ConfigMa if file.IsDir() { continue } - content, err := ioutil.ReadFile(filepath.Join(hadoopConfDir, file.Name())) + content, err := os.ReadFile(filepath.Join(hadoopConfDir, file.Name())) if err != nil { return nil, err } diff --git a/sparkctl/cmd/create_test.go b/sparkctl/cmd/create_test.go index e319ddfb2..aa3d89615 100644 --- a/sparkctl/cmd/create_test.go +++ b/sparkctl/cmd/create_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/api/v1beta2" ) func TestIsLocalFile(t *testing.T) { @@ -84,9 +84,9 @@ func TestValidateSpec(t *testing.T) { testFn := func(test testcase, t *testing.T) { err := validateSpec(test.spec) if test.expectsValidationError { - assert.True(t, err != nil, "%s: expected error got nothing", test.name) + assert.Error(t, err, "%s: expected error got nothing", test.name) } else { - assert.True(t, err == nil, "%s: did not expect error got %v", test.name, err) + assert.NoError(t, err, "%s: did not expect error got %v", test.name, err) } } @@ -161,12 +161,12 @@ func TestLoadFromYAML(t *testing.T) { t.Fatal(err) } - assert.Equal(t, app.Name, "example") - assert.Equal(t, *app.Spec.MainClass, "org.examples.SparkExample") - assert.Equal(t, *app.Spec.MainApplicationFile, "local:///path/to/example.jar") - assert.Equal(t, *app.Spec.Driver.Image, "spark") - assert.Equal(t, *app.Spec.Executor.Image, "spark") - assert.Equal(t, int(*app.Spec.Executor.Instances), 1) + assert.Equal(t, "example", app.Name) + assert.Equal(t, "org.examples.SparkExample", *app.Spec.MainClass) + assert.Equal(t, "local:///path/to/example.jar", *app.Spec.MainApplicationFile) + assert.Equal(t, "spark", *app.Spec.Driver.Image) + assert.Equal(t, "spark", *app.Spec.Executor.Image) + assert.Equal(t, 1, int(*app.Spec.Executor.Instances)) } func TestHandleHadoopConfiguration(t *testing.T) { @@ -175,8 +175,8 @@ func TestHandleHadoopConfiguration(t *testing.T) { t.Fatal(err) } - assert.Equal(t, configMap.Name, "test-hadoop-config") - assert.Equal(t, len(configMap.BinaryData), 1) - assert.Equal(t, len(configMap.Data), 1) + assert.Equal(t, "test-hadoop-config", configMap.Name) + assert.Len(t, configMap.BinaryData, 1) + assert.Len(t, configMap.Data, 1) assert.True(t, strings.Contains(configMap.Data["core-site.xml"], "fs.gs.impl")) } diff --git a/sparkctl/cmd/delete.go b/sparkctl/cmd/delete.go index d6366c747..f75dc65df 100644 --- a/sparkctl/cmd/delete.go +++ b/sparkctl/cmd/delete.go @@ -22,7 +22,6 @@ import ( "os" "github.com/spf13/cobra" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" @@ -32,7 +31,7 @@ var deleteCmd = &cobra.Command{ Use: "delete ", Short: "Delete a SparkApplication object", Long: `Delete a SparkApplication object with a given name`, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, args []string) { if len(args) != 1 { fmt.Fprintln(os.Stderr, "must specify a SparkApplication name") return diff --git a/sparkctl/cmd/event.go b/sparkctl/cmd/event.go index 5553c9c27..38559b505 100644 --- a/sparkctl/cmd/event.go +++ b/sparkctl/cmd/event.go @@ -25,8 +25,7 @@ import ( "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" - - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" @@ -147,7 +146,7 @@ func streamEvents(events watch.Interface, streamSince int64) error { table.Render() // Set 10 minutes inactivity timeout - watchExpire := time.Duration(10 * time.Minute) + watchExpire := 10 * time.Minute intr := interrupt.New(nil, events.Stop) return intr.Run(func() error { // Start rendering contents of the table without table header as it is already printed diff --git a/sparkctl/cmd/forward.go b/sparkctl/cmd/forward.go index dbaeb9c67..6af80de3e 100644 --- a/sparkctl/cmd/forward.go +++ b/sparkctl/cmd/forward.go @@ -69,7 +69,7 @@ var forwardCmd = &cobra.Command{ } restClient := kubeClientset.CoreV1().RESTClient() - driverPodUrl, driverPodName, err := getDriverPodUrlAndName(args[0], restClient, crdClientset) + driverPodURL, driverPodName, err := getDriverPodURLAndName(args[0], restClient, crdClientset) if err != nil { fmt.Fprintf(os.Stderr, "failed to get an API server URL of the driver pod of SparkApplication %s: %v\n", @@ -80,7 +80,7 @@ var forwardCmd = &cobra.Command{ stopCh := make(chan struct{}, 1) readyCh := make(chan struct{}) - forwarder, err := newPortForwarder(config, driverPodUrl, stopCh, readyCh) + forwarder, err := newPortForwarder(config, driverPodURL, stopCh, readyCh) if err != nil { fmt.Fprintf(os.Stderr, "failed to get a port forwarder: %v\n", err) return @@ -120,7 +120,7 @@ func newPortForwarder( return fw, nil } -func getDriverPodUrlAndName( +func getDriverPodURLAndName( name string, restClient rest.Interface, crdClientset crdclientset.Interface) (*url.URL, string, error) { diff --git a/sparkctl/cmd/gcs.go b/sparkctl/cmd/gcs.go index 3fa2c35ff..fc807f892 100644 --- a/sparkctl/cmd/gcs.go +++ b/sparkctl/cmd/gcs.go @@ -26,7 +26,7 @@ import ( ) type blobGCS struct { - projectId string + projectID string endpoint string region string } @@ -41,7 +41,7 @@ func (blob blobGCS) setPublicACL( } defer client.Close() - handle := client.Bucket(bucket).UserProject(blob.projectId) + handle := client.Bucket(bucket).UserProject(blob.projectID) if err = handle.Object(filePath).ACL().Set(ctx, storage.AllUsers, storage.RoleReader); err != nil { return fmt.Errorf("failed to set ACL on GCS object %s: %v", filePath, err) } @@ -58,7 +58,7 @@ func newGCSBlob( return nil, err } - projectId, err := gcp.DefaultProjectID(creds) + projectID, err := gcp.DefaultProjectID(creds) if err != nil { return nil, err } @@ -70,7 +70,7 @@ func newGCSBlob( b, err := gcsblob.OpenBucket(ctx, c, bucket, nil) return &uploadHandler{ - blob: blobGCS{endpoint: endpoint, region: region, projectId: string(projectId)}, + blob: blobGCS{endpoint: endpoint, region: region, projectID: string(projectID)}, ctx: ctx, b: b, blobUploadBucket: bucket, diff --git a/sparkctl/cmd/list.go b/sparkctl/cmd/list.go index 0ecbe16bb..5777d14db 100644 --- a/sparkctl/cmd/list.go +++ b/sparkctl/cmd/list.go @@ -23,7 +23,6 @@ import ( "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" @@ -33,7 +32,7 @@ var listCmd = &cobra.Command{ Use: "list", Short: "List SparkApplication objects", Long: `List SparkApplication objects in a given namespaces.`, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, args []string) { crdClientset, err := getSparkApplicationClient() if err != nil { fmt.Fprintf(os.Stderr, "failed to get SparkApplication client: %v\n", err) @@ -56,7 +55,7 @@ func doList(crdClientset crdclientset.Interface) error { table.SetHeader([]string{"Name", "State", "Submission Age", "Termination Age"}) for _, app := range apps.Items { table.Append([]string{ - string(app.Name), + app.Name, string(app.Status.AppState.State), getSinceTime(app.Status.LastSubmissionAttemptTime), getSinceTime(app.Status.TerminationTime), diff --git a/sparkctl/cmd/log.go b/sparkctl/cmd/log.go index 764c21484..f86b25ecd 100644 --- a/sparkctl/cmd/log.go +++ b/sparkctl/cmd/log.go @@ -24,15 +24,14 @@ import ( "time" "github.com/spf13/cobra" - - apiv1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) -var ExecutorId int32 +var ExecutorID int32 var FollowLogs bool var logCommand = &cobra.Command{ @@ -64,7 +63,7 @@ var logCommand = &cobra.Command{ } func init() { - logCommand.Flags().Int32VarP(&ExecutorId, "executor", "e", -1, + logCommand.Flags().Int32VarP(&ExecutorID, "executor", "e", -1, "id of the executor to fetch logs for") logCommand.Flags().BoolVarP(&FollowLogs, "follow", "f", false, "whether to stream the logs") } @@ -96,9 +95,8 @@ func doLog( if followLogs { return streamLogs(os.Stdout, kubeClient, podName) - } else { - return printLogs(os.Stdout, kubeClient, podName) } + return printLogs(os.Stdout, kubeClient, podName) } func getPodNameChannel( @@ -107,7 +105,7 @@ func getPodNameChannel( channel := make(chan string, 1) go func() { - for true { + for { app, _ := crdClient.SparkoperatorV1beta2().SparkApplications(Namespace).Get( context.TODO(), sparkApplicationName, @@ -125,12 +123,12 @@ func getPodNameChannel( func waitForLogsFromPodChannel( podName string, kubeClient clientset.Interface, - crdClient crdclientset.Interface) chan bool { + _ crdclientset.Interface) chan bool { channel := make(chan bool, 1) go func() { - for true { - _, err := kubeClient.CoreV1().Pods(Namespace).GetLogs(podName, &apiv1.PodLogOptions{}).Do(context.TODO()).Raw() + for { + _, err := kubeClient.CoreV1().Pods(Namespace).GetLogs(podName, &corev1.PodLogOptions{}).Do(context.TODO()).Raw() if err == nil { channel <- true @@ -143,7 +141,7 @@ func waitForLogsFromPodChannel( // printLogs is a one time operation that prints the fetched logs of the given pod. func printLogs(out io.Writer, kubeClientset clientset.Interface, podName string) error { - rawLogs, err := kubeClientset.CoreV1().Pods(Namespace).GetLogs(podName, &apiv1.PodLogOptions{}).Do(context.TODO()).Raw() + rawLogs, err := kubeClientset.CoreV1().Pods(Namespace).GetLogs(podName, &corev1.PodLogOptions{}).Do(context.TODO()).Raw() if err != nil { return err } @@ -153,7 +151,7 @@ func printLogs(out io.Writer, kubeClientset clientset.Interface, podName string) // streamLogs streams the logs of the given pod until there are no more logs available. func streamLogs(out io.Writer, kubeClientset clientset.Interface, podName string) error { - request := kubeClientset.CoreV1().Pods(Namespace).GetLogs(podName, &apiv1.PodLogOptions{Follow: true}) + request := kubeClientset.CoreV1().Pods(Namespace).GetLogs(podName, &corev1.PodLogOptions{Follow: true}) reader, err := request.Stream(context.TODO()) if err != nil { return err diff --git a/sparkctl/cmd/s3.go b/sparkctl/cmd/s3.go index abc92cc4c..28e9350ae 100644 --- a/sparkctl/cmd/s3.go +++ b/sparkctl/cmd/s3.go @@ -53,7 +53,7 @@ func newS3Blob( if region == "" { region = "us-east1" } - endpointResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { + endpointResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, _ ...interface{}) (aws.Endpoint, error) { if service == s3.ServiceID && endpoint != "" { return aws.Endpoint{ PartitionID: "aws", diff --git a/sparkctl/cmd/status.go b/sparkctl/cmd/status.go index 8502e72b0..cd773454a 100644 --- a/sparkctl/cmd/status.go +++ b/sparkctl/cmd/status.go @@ -23,7 +23,7 @@ import ( "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/api/v1beta2" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) @@ -31,7 +31,7 @@ var statusCmd = &cobra.Command{ Use: "status ", Short: "Check status of a SparkApplication", Long: `Check status of a SparkApplication with a given name`, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, args []string) { if len(args) != 1 { fmt.Fprintln(os.Stderr, "must specify a SparkApplication name") return diff --git a/test/e2e/README.md b/test/e2e/README.md deleted file mode 100644 index d29f882aa..000000000 --- a/test/e2e/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# E2E Testing - -End-to-end (e2e) testing is automated testing for real user scenarios. - -## Build and Run Tests - -Prerequisites: -- A running k8s cluster and kube config. We will need to pass kube config as arguments. -- Have kubeconfig file ready. -- Have a Kubernetes Operator for Spark image ready. - -e2e tests are written as Go test. All go test techniques apply (e.g. picking what to run, timeout length). Let's say I want to run all tests in "test/e2e/": - -```bash -$ docker build -t gcr.io/spark-operator/spark-operator:local . -$ go test -v ./test/e2e/ --kubeconfig "$HOME/.kube/config" --operator-image=gcr.io/spark-operator/spark-operator:local -``` - -### Available Tests - -Note that all tests are run on a live Kubernetes cluster. After the tests are done, the Spark Operator deployment and associated resources (e.g. ClusterRole and ClusterRoleBinding) are deleted from the cluster. - -* `basic_test.go` - - This test submits `spark-pi.yaml` contained in `\examples`. It then checks that the Spark job successfully completes with the correct result of Pi. - -* `volume_mount_test.go` - - This test submits `spark-pi-configmap.yaml` contained in `\examples`. It verifies that a dummy ConfigMap can be mounted in the Spark pods. - -* `lifecycle_test.go` - - This test submits `spark-pi.yaml` contained in `\examples`. It verifies that the created SparkApplication CRD object goes through the correct series of states as dictated by the controller. Once the job is finished, an update operation is performed on the CRD object to trigger a re-run. The transition from a completed job to a new running job is verified for correctness. diff --git a/test/e2e/basic_test.go b/test/e2e/basic_test.go deleted file mode 100644 index f6e2edf21..000000000 --- a/test/e2e/basic_test.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "context" - "log" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/wait" - - appFramework "github.com/kubeflow/spark-operator/test/e2e/framework" -) - -func TestSubmitSparkPiYaml(t *testing.T) { - t.Parallel() - - appName := "spark-pi" - sa, err := appFramework.MakeSparkApplicationFromYaml("../../examples/spark-pi.yaml") - assert.Equal(t, nil, err) - - if appFramework.SparkTestNamespace != "" { - sa.ObjectMeta.Namespace = appFramework.SparkTestNamespace - } - - if appFramework.SparkTestServiceAccount != "" { - sa.Spec.Driver.ServiceAccount = &appFramework.SparkTestServiceAccount - } - - if appFramework.SparkTestImage != "" { - sa.Spec.Image = &appFramework.SparkTestImage - } - - err = appFramework.CreateSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, sa) - assert.Equal(t, nil, err) - - status := GetJobStatus(t, appName) - - err = wait.Poll(INTERVAL, TIMEOUT, func() (done bool, err error) { - if status == "COMPLETED" { - return true, nil - } - status = GetJobStatus(t, appName) - return false, nil - }) - assert.Equal(t, nil, err) - - app, _ := appFramework.GetSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - podName := app.Status.DriverInfo.PodName - log.Printf("LABELS: %v", app.ObjectMeta.GetLabels()) - rawLogs, err := framework.KubeClient.CoreV1().Pods(appFramework.SparkTestNamespace).GetLogs(podName, &v1.PodLogOptions{}).Do(context.TODO()).Raw() - assert.Equal(t, nil, err) - assert.NotEqual(t, -1, strings.Index(string(rawLogs), "Pi is roughly 3")) - - err = appFramework.DeleteSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - assert.Equal(t, nil, err) -} - -func TestSubmitSparkPiCustomResourceYaml(t *testing.T) { - t.Parallel() - - appName := "spark-pi-custom-resource" - sa, err := appFramework.MakeSparkApplicationFromYaml("../../examples/spark-pi-custom-resource.yaml") - assert.Equal(t, nil, err) - - if appFramework.SparkTestNamespace != "" { - sa.ObjectMeta.Namespace = appFramework.SparkTestNamespace - } - - if appFramework.SparkTestServiceAccount != "" { - sa.Spec.Driver.ServiceAccount = &appFramework.SparkTestServiceAccount - } - - if appFramework.SparkTestImage != "" { - sa.Spec.Image = &appFramework.SparkTestImage - } - - err = appFramework.CreateSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, sa) - assert.Equal(t, nil, err) - - status := GetJobStatus(t, appName) - - err = wait.Poll(INTERVAL, TIMEOUT, func() (done bool, err error) { - if status == "COMPLETED" { - return true, nil - } - status = GetJobStatus(t, appName) - return false, nil - }) - assert.Equal(t, nil, err) - - app, _ := appFramework.GetSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - podName := app.Status.DriverInfo.PodName - rawLogs, err := framework.KubeClient.CoreV1().Pods(appFramework.SparkTestNamespace).GetLogs(podName, &v1.PodLogOptions{}).Do(context.TODO()).Raw() - assert.Equal(t, nil, err) - assert.NotEqual(t, -1, strings.Index(string(rawLogs), "Pi is roughly 3")) - - err = appFramework.DeleteSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - assert.Equal(t, nil, err) -} diff --git a/test/e2e/framework/cluster_role.go b/test/e2e/framework/cluster_role.go deleted file mode 100644 index df7adf2f6..000000000 --- a/test/e2e/framework/cluster_role.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "io" - "os" - - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" -) - -func CreateClusterRole(kubeClient kubernetes.Interface, relativePath string) error { - clusterRole, err := parseClusterRoleYaml(relativePath) - if err != nil { - return err - } - - _, err = kubeClient.RbacV1().ClusterRoles().Get(context.TODO(), clusterRole.Name, metav1.GetOptions{}) - - if err == nil { - // ClusterRole already exists -> Update - _, err = kubeClient.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{}) - if err != nil { - return err - } - - } else { - // ClusterRole doesn't exists -> Create - _, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{}) - if err != nil { - return err - } - } - - return nil -} - -func DeleteClusterRole(kubeClient kubernetes.Interface, relativePath string) error { - clusterRole, err := parseClusterRoleYaml(relativePath) - if err != nil { - return err - } - - if err := kubeClient.RbacV1().ClusterRoles().Delete(context.TODO(), clusterRole.Name, metav1.DeleteOptions{}); err != nil { - return err - } - - return nil -} - -func parseClusterRoleYaml(relativePath string) (*rbacv1.ClusterRole, error) { - var manifest *os.File - var err error - - var clusterRole rbacv1.ClusterRole - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "ClusterRole" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &clusterRole) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &clusterRole, nil -} diff --git a/test/e2e/framework/cluster_role_binding.go b/test/e2e/framework/cluster_role_binding.go deleted file mode 100644 index e3224c3aa..000000000 --- a/test/e2e/framework/cluster_role_binding.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "io" - "os" - - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" -) - -func CreateClusterRoleBinding(kubeClient kubernetes.Interface, relativePath string) (finalizerFn, error) { - finalizerFn := func() error { - return DeleteClusterRoleBinding(kubeClient, relativePath) - } - clusterRoleBinding, err := parseClusterRoleBindingYaml(relativePath) - if err != nil { - return finalizerFn, err - } - - _, err = kubeClient.RbacV1().ClusterRoleBindings().Get(context.TODO(), clusterRoleBinding.Name, metav1.GetOptions{}) - - if err == nil { - // ClusterRoleBinding already exists -> Update - _, err = kubeClient.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterRoleBinding, metav1.UpdateOptions{}) - if err != nil { - return finalizerFn, err - } - } else { - // ClusterRoleBinding doesn't exists -> Create - _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, metav1.CreateOptions{}) - if err != nil { - return finalizerFn, err - } - } - - return finalizerFn, err -} - -func DeleteClusterRoleBinding(kubeClient kubernetes.Interface, relativePath string) error { - clusterRoleBinding, err := parseClusterRoleYaml(relativePath) - if err != nil { - return err - } - - if err := kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBinding.Name, metav1.DeleteOptions{}); err != nil { - return err - } - - return nil -} - -func parseClusterRoleBindingYaml(relativePath string) (*rbacv1.ClusterRoleBinding, error) { - var manifest *os.File - var err error - - var clusterRoleBinding rbacv1.ClusterRoleBinding - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "ClusterRoleBinding" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &clusterRoleBinding) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &clusterRoleBinding, nil -} diff --git a/test/e2e/framework/config_map.go b/test/e2e/framework/config_map.go deleted file mode 100644 index 01061b827..000000000 --- a/test/e2e/framework/config_map.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - "github.com/pkg/errors" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -func CreateConfigMap(kubeClient kubernetes.Interface, name string, namespace string) (*v1.ConfigMap, error) { - configMap := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Data: map[string]string{ - "testKey": "testValue", - }, - } - - _, err := kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - - if err == nil { - // ConfigMap already exists -> Update - configMap, err = kubeClient.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}) - if err != nil { - return nil, err - } - } else { - // ConfigMap doesn't exists -> Create - configMap, err = kubeClient.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - } - - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to create ConfigMap with name %v", name)) - } - return configMap, nil -} - -func DeleteConfigMap(kubeClient kubernetes.Interface, name string, namespace string) error { - return kubeClient.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} diff --git a/test/e2e/framework/context.go b/test/e2e/framework/context.go deleted file mode 100644 index 4d422cb82..000000000 --- a/test/e2e/framework/context.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "strconv" - "strings" - "testing" - "time" - - "golang.org/x/sync/errgroup" -) - -type TestCtx struct { - ID string - cleanUpFns []finalizerFn -} - -type finalizerFn func() error - -func (f *Framework) NewTestCtx(t *testing.T) TestCtx { - // TestCtx is used among others for namespace names where '/' is forbidden - prefix := strings.TrimPrefix( - strings.Replace( - strings.ToLower(t.Name()), - "/", - "-", - -1, - ), - "test", - ) - - id := prefix + "-" + strconv.FormatInt(time.Now().Unix(), 36) - return TestCtx{ - ID: id, - } -} - -// GetObjID returns an ascending ID based on the length of cleanUpFns. It is -// based on the premise that every new object also appends a new finalizerFn on -// cleanUpFns. This can e.g. be used to create multiple namespaces in the same -// test context. -func (ctx *TestCtx) GetObjID() string { - return ctx.ID + "-" + strconv.Itoa(len(ctx.cleanUpFns)) -} - -func (ctx *TestCtx) Cleanup(t *testing.T) { - var eg errgroup.Group - - for i := len(ctx.cleanUpFns) - 1; i >= 0; i-- { - eg.Go(ctx.cleanUpFns[i]) - } - - if err := eg.Wait(); err != nil { - t.Fatal(err) - } -} - -func (ctx *TestCtx) AddFinalizerFn(fn finalizerFn) { - ctx.cleanUpFns = append(ctx.cleanUpFns, fn) -} diff --git a/test/e2e/framework/deployment.go b/test/e2e/framework/deployment.go deleted file mode 100644 index e79c5c635..000000000 --- a/test/e2e/framework/deployment.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - "time" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" -) - -func MakeDeployment(pathToYaml string) (*appsv1.Deployment, error) { - manifest, err := PathToOSFile(pathToYaml) - if err != nil { - return nil, err - } - deployment := appsv1.Deployment{} - if err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&deployment); err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to decode file %s", pathToYaml)) - } - - return &deployment, nil -} - -func CreateDeployment(kubeClient kubernetes.Interface, namespace string, d *appsv1.Deployment) error { - _, err := kubeClient.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to create deployment %s", d.Name)) - } - return nil -} - -func DeleteDeployment(kubeClient kubernetes.Interface, namespace, name string) error { - d, err := kubeClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return err - } - - zero := int32(0) - d.Spec.Replicas = &zero - - d, err = kubeClient.AppsV1().Deployments(namespace).Update(context.TODO(), d, metav1.UpdateOptions{}) - if err != nil { - return err - } - return kubeClient.AppsV1().Deployments(namespace).Delete(context.TODO(), d.Name, metav1.DeleteOptions{}) -} - -func WaitUntilDeploymentGone(kubeClient kubernetes.Interface, namespace, name string, timeout time.Duration) error { - return wait.Poll(time.Second, timeout, func() (bool, error) { - _, err := kubeClient. - AppsV1().Deployments(namespace). - Get(context.TODO(), name, metav1.GetOptions{}) - - if err != nil { - if apierrors.IsNotFound(err) { - return true, nil - } - - return false, err - } - - return false, nil - }) -} diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go deleted file mode 100644 index a3d7c17a7..000000000 --- a/test/e2e/framework/framework.go +++ /dev/null @@ -1,216 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - "time" - - crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" - "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/client-go/kubernetes" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/tools/clientcmd" - - "github.com/pkg/errors" -) - -// Framework contains all components required to run the test framework. -type Framework struct { - KubeClient kubernetes.Interface - SparkApplicationClient crdclientset.Interface - MasterHost string - Namespace *v1.Namespace - SparkTestNamespace *v1.Namespace - OperatorPod *v1.Pod - DefaultTimeout time.Duration -} - -var SparkTestNamespace = "" -var SparkTestServiceAccount = "" -var SparkTestImage = "" - -// Sets up a test framework and returns it. -func New(ns, sparkNs, kubeconfig, opImage string, opImagePullPolicy string) (*Framework, error) { - config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) - if err != nil { - return nil, errors.Wrap(err, "build config from flags failed") - } - - cli, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, errors.Wrap(err, "creating new kube-client failed") - } - - namespace, err := CreateNamespace(cli, ns) - if err != nil { - fmt.Println(nil, err, namespace) - } - - sparkTestNamespace, err := CreateNamespace(cli, sparkNs) - if err != nil { - fmt.Println(nil, err, sparkNs) - } - - saClient, err := crdclientset.NewForConfig(config) - if err != nil { - return nil, errors.Wrap(err, "failed to create SparkApplication client") - } - - f := &Framework{ - MasterHost: config.Host, - KubeClient: cli, - SparkApplicationClient: saClient, - Namespace: namespace, - SparkTestNamespace: sparkTestNamespace, - DefaultTimeout: time.Minute, - } - - err = f.Setup(sparkNs, opImage, opImagePullPolicy) - if err != nil { - return nil, errors.Wrap(err, "setup test environment failed") - } - - return f, nil -} - -func (f *Framework) Setup(sparkNs, opImage string, opImagePullPolicy string) error { - if err := f.setupOperator(sparkNs, opImage, opImagePullPolicy); err != nil { - return errors.Wrap(err, "setup operator failed") - } - - return nil -} - -func (f *Framework) setupOperator(sparkNs, opImage string, opImagePullPolicy string) error { - if _, err := CreateServiceAccount(f.KubeClient, f.Namespace.Name, "../../manifest/spark-operator-install/spark-operator-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create operator service account") - } - - if err := CreateClusterRole(f.KubeClient, "../../manifest/spark-operator-install/spark-operator-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create cluster role") - } - - if _, err := CreateClusterRoleBinding(f.KubeClient, "../../manifest/spark-operator-install/spark-operator-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create cluster role binding") - } - - if _, err := CreateServiceAccount(f.KubeClient, f.SparkTestNamespace.Name, "../../manifest/spark-application-rbac/spark-application-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create Spark service account") - } - - if err := CreateRole(f.KubeClient, f.SparkTestNamespace.Name, "../../manifest/spark-application-rbac/spark-application-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create role") - } - - if _, err := CreateRoleBinding(f.KubeClient, f.SparkTestNamespace.Name, "../../manifest/spark-application-rbac/spark-application-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create role binding") - } - - job, err := MakeJob("../../manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml") - if err != nil { - return err - } - - if opImage != "" { - // Override operator image used, if specified when running tests. - job.Spec.Template.Spec.Containers[0].Image = opImage - } - - for _, container := range job.Spec.Template.Spec.Containers { - container.ImagePullPolicy = v1.PullPolicy(opImagePullPolicy) - } - - err = CreateJob(f.KubeClient, f.Namespace.Name, job) - if err != nil { - return errors.Wrap(err, "failed to create job that creates the webhook secret") - } - - err = WaitUntilJobCompleted(f.KubeClient, f.Namespace.Name, job.Name, time.Minute) - if err != nil { - return errors.Wrap(err, "The gencert job failed or timed out") - } - - if err := DeleteJob(f.KubeClient, f.Namespace.Name, job.Name); err != nil { - return errors.Wrap(err, "failed to delete the init job") - } - - if _, err := CreateService(f.KubeClient, f.Namespace.Name, "../../manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml"); err != nil { - return errors.Wrap(err, "failed to create webhook service") - } - - deploy, err := MakeDeployment("../../manifest/spark-operator-with-webhook-install/spark-operator-with-webhook.yaml") - if err != nil { - return err - } - - if opImage != "" { - // Override operator image used, if specified when running tests. - deploy.Spec.Template.Spec.Containers[0].Image = opImage - } - - for _, container := range deploy.Spec.Template.Spec.Containers { - container.ImagePullPolicy = v1.PullPolicy(opImagePullPolicy) - } - - err = CreateDeployment(f.KubeClient, f.Namespace.Name, deploy) - if err != nil { - return err - } - - opts := metav1.ListOptions{LabelSelector: fields.SelectorFromSet(fields.Set(deploy.Spec.Template.ObjectMeta.Labels)).String()} - err = WaitForPodsReady(f.KubeClient, f.Namespace.Name, f.DefaultTimeout, 1, opts) - if err != nil { - return errors.Wrap(err, "failed to wait for operator to become ready") - } - - pl, err := f.KubeClient.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), opts) - if err != nil { - return err - } - f.OperatorPod = &pl.Items[0] - return nil -} - -// Teardown tears down a previously initialized test environment. -func (f *Framework) Teardown() error { - if err := DeleteClusterRole(f.KubeClient, "../../manifest/spark-operator-install/spark-operator-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to delete operator cluster role") - } - - if err := DeleteClusterRoleBinding(f.KubeClient, "../../manifest/spark-operator-install/spark-operator-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to delete operator cluster role binding") - } - - if err := f.KubeClient.AppsV1().Deployments(f.Namespace.Name).Delete(context.TODO(), "sparkoperator", metav1.DeleteOptions{}); err != nil { - return err - } - - if err := DeleteNamespace(f.KubeClient, f.Namespace.Name); err != nil { - return err - } - - if err := DeleteNamespace(f.KubeClient, f.SparkTestNamespace.Name); err != nil { - return err - } - - return nil -} diff --git a/test/e2e/framework/helpers.go b/test/e2e/framework/helpers.go deleted file mode 100644 index 16e60a766..000000000 --- a/test/e2e/framework/helpers.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - "net/http" - "os" - "path/filepath" - "time" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - "github.com/pkg/errors" -) - -// PathToOSFile gets the absolute path from relative path. -func PathToOSFile(relativePath string) (*os.File, error) { - path, err := filepath.Abs(relativePath) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed generate absolute file path of %s", relativePath)) - } - - manifest, err := os.Open(path) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to open file %s", path)) - } - - return manifest, nil -} - -// WaitForPodsReady waits for a selection of Pods to be running and each -// container to pass its readiness check. -func WaitForPodsReady(kubeClient kubernetes.Interface, namespace string, timeout time.Duration, expectedReplicas int, opts metav1.ListOptions) error { - return wait.Poll(time.Second, timeout, func() (bool, error) { - pl, err := kubeClient.CoreV1().Pods(namespace).List(context.TODO(), opts) - if err != nil { - return false, err - } - - runningAndReady := 0 - for _, p := range pl.Items { - isRunningAndReady, err := PodRunningAndReady(p) - if err != nil { - return false, err - } - - if isRunningAndReady { - runningAndReady++ - } - } - - if runningAndReady == expectedReplicas { - return true, nil - } - return false, nil - }) -} - -func WaitForPodsRunImage(kubeClient kubernetes.Interface, namespace string, expectedReplicas int, image string, opts metav1.ListOptions) error { - return wait.Poll(time.Second, time.Minute*5, func() (bool, error) { - pl, err := kubeClient.CoreV1().Pods(namespace).List(context.TODO(), opts) - if err != nil { - return false, err - } - - runningImage := 0 - for _, p := range pl.Items { - if podRunsImage(p, image) { - runningImage++ - } - } - - if runningImage == expectedReplicas { - return true, nil - } - return false, nil - }) -} - -func WaitForHTTPSuccessStatusCode(timeout time.Duration, url string) error { - var resp *http.Response - err := wait.Poll(time.Second, timeout, func() (bool, error) { - var err error - resp, err = http.Get(url) - if err == nil && resp.StatusCode == 200 { - return true, nil - } - return false, nil - }) - - return errors.Wrap(err, fmt.Sprintf( - "waiting for %v to return a successful status code timed out. Last response from server was: %v", - url, - resp, - )) -} - -func podRunsImage(p v1.Pod, image string) bool { - for _, c := range p.Spec.Containers { - if image == c.Image { - return true - } - } - - return false -} - -func GetLogs(kubeClient kubernetes.Interface, namespace string, podName, containerName string) (string, error) { - logs, err := kubeClient.CoreV1().RESTClient().Get(). - Resource("pods"). - Namespace(namespace). - Name(podName).SubResource("log"). - Param("container", containerName). - Do(context.TODO()). - Raw() - if err != nil { - return "", err - } - return string(logs), err -} - -func ProxyGetPod(kubeClient kubernetes.Interface, namespace string, podName string, port string, path string) *rest.Request { - return kubeClient.CoreV1().RESTClient().Get().Prefix("proxy").Namespace(namespace).Resource("pods").Name(podName + ":" + port).Suffix(path) -} diff --git a/test/e2e/framework/job.go b/test/e2e/framework/job.go deleted file mode 100644 index 52d2d4ec2..000000000 --- a/test/e2e/framework/job.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "fmt" - "github.com/pkg/errors" - "io" - "k8s.io/apimachinery/pkg/util/wait" - "os" - "time" - - batchv1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" -) - -func MakeJob(pathToYaml string) (*batchv1.Job, error) { - job, err := parseJobYaml(pathToYaml) - if err != nil { - return nil, err - } - - return job, nil -} - -func CreateJob(kubeClient kubernetes.Interface, namespace string, job *batchv1.Job) error { - _, err := kubeClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{}) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to create job %s", job.Name)) - } - return nil -} - -func DeleteJob(kubeClient kubernetes.Interface, namespace, name string) error { - deleteProp := metav1.DeletePropagationForeground - return kubeClient.BatchV1().Jobs(namespace).Delete( - context.TODO(), - name, - metav1.DeleteOptions{PropagationPolicy: &deleteProp}, - ) -} - -func parseJobYaml(relativePath string) (*batchv1.Job, error) { - var manifest *os.File - var err error - - var job batchv1.Job - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "Job" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &job) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &job, nil -} - -func WaitUntilJobCompleted(kubeClient kubernetes.Interface, namespace, name string, timeout time.Duration) error { - return wait.Poll(time.Second, timeout, func() (bool, error) { - job, _ := kubeClient. - BatchV1().Jobs(namespace). - Get(context.TODO(), name, metav1.GetOptions{}) - - if job.Status.Succeeded == 1 { - return true, nil - } else { - return false, nil - } - }) -} diff --git a/test/e2e/framework/namespace.go b/test/e2e/framework/namespace.go deleted file mode 100644 index cdd15476e..000000000 --- a/test/e2e/framework/namespace.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - "testing" - - "github.com/pkg/errors" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -func CreateNamespace(kubeClient kubernetes.Interface, name string) (*v1.Namespace, error) { - namespace, err := kubeClient.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - }, - metav1.CreateOptions{}, - ) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to create namespace with name %v", name)) - } - return namespace, nil -} - -func (ctx *TestCtx) CreateNamespace(t *testing.T, kubeClient kubernetes.Interface) string { - name := ctx.GetObjID() - if _, err := CreateNamespace(kubeClient, name); err != nil { - t.Fatal(err) - } - - namespaceFinalizerFn := func() error { - if err := DeleteNamespace(kubeClient, name); err != nil { - return err - } - return nil - } - - ctx.AddFinalizerFn(namespaceFinalizerFn) - - return name -} - -func DeleteNamespace(kubeClient kubernetes.Interface, name string) error { - return kubeClient.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{}) -} diff --git a/test/e2e/framework/operator.go b/test/e2e/framework/operator.go deleted file mode 100644 index 0b1d6467d..000000000 --- a/test/e2e/framework/operator.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "fmt" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// CustomResourceDefinitionTypeMeta sets the default kind/apiversion of CRD -var CustomResourceDefinitionTypeMeta = metav1.TypeMeta{ - Kind: "CustomResourceDefinition", - APIVersion: "apiextensions.k8s.io/v1beta1", -} - -// PodRunningAndReady returns whether a pod is running and each container has -// passed it's ready state. -func PodRunningAndReady(pod v1.Pod) (bool, error) { - switch pod.Status.Phase { - case v1.PodFailed, v1.PodSucceeded: - return false, fmt.Errorf("pod completed") - case v1.PodRunning: - for _, cond := range pod.Status.Conditions { - if cond.Type != v1.PodReady { - continue - } - return cond.Status == v1.ConditionTrue, nil - } - return false, fmt.Errorf("pod ready condition not found") - } - return false, nil -} diff --git a/test/e2e/framework/role.go b/test/e2e/framework/role.go deleted file mode 100644 index db4064f3d..000000000 --- a/test/e2e/framework/role.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2019 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "io" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" - "os" -) - -func CreateRole(kubeClient kubernetes.Interface, ns string, relativePath string) error { - role, err := parseRoleYaml(relativePath) - if err != nil { - return err - } - - _, err = kubeClient.RbacV1().Roles(ns).Get(context.TODO(), role.Name, metav1.GetOptions{}) - - if err == nil { - // Role already exists -> Update - _, err = kubeClient.RbacV1().Roles(ns).Update(context.TODO(), role, metav1.UpdateOptions{}) - if err != nil { - return err - } - - } else { - // Role doesn't exists -> Create - _, err = kubeClient.RbacV1().Roles(ns).Create(context.TODO(), role, metav1.CreateOptions{}) - if err != nil { - return err - } - } - - return nil -} - -func DeleteRole(kubeClient kubernetes.Interface, ns string, relativePath string) error { - role, err := parseRoleYaml(relativePath) - if err != nil { - return err - } - - if err := kubeClient.RbacV1().Roles(ns).Delete(context.TODO(), role.Name, metav1.DeleteOptions{}); err != nil { - return err - } - - return nil -} - -func parseRoleYaml(relativePath string) (*rbacv1.Role, error) { - var manifest *os.File - var err error - - var role rbacv1.Role - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "Role" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &role) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &role, nil -} diff --git a/test/e2e/framework/role_binding.go b/test/e2e/framework/role_binding.go deleted file mode 100644 index 955ad9619..000000000 --- a/test/e2e/framework/role_binding.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "io" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" - "os" -) - -func CreateRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath string) (finalizerFn, error) { - finalizerFn := func() error { - return DeleteRoleBinding(kubeClient, ns, relativePath) - } - roleBinding, err := parseRoleBindingYaml(relativePath) - if err != nil { - return finalizerFn, err - } - - roleBinding.Namespace = ns - - _, err = kubeClient.RbacV1().RoleBindings(ns).Get(context.TODO(), roleBinding.Name, metav1.GetOptions{}) - - if err == nil { - // RoleBinding already exists -> Update - _, err = kubeClient.RbacV1().RoleBindings(ns).Update(context.TODO(), roleBinding, metav1.UpdateOptions{}) - if err != nil { - return finalizerFn, err - } - } else { - // RoleBinding doesn't exists -> Create - _, err = kubeClient.RbacV1().RoleBindings(ns).Create(context.TODO(), roleBinding, metav1.CreateOptions{}) - if err != nil { - return finalizerFn, err - } - } - - return finalizerFn, err -} - -func DeleteRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath string) error { - roleBinding, err := parseRoleBindingYaml(relativePath) - if err != nil { - return err - } - - if err := kubeClient.RbacV1().RoleBindings(ns).Delete( - context.TODO(), - roleBinding.Name, - metav1.DeleteOptions{}, - ); err != nil { - return err - } - - return nil -} - -func parseRoleBindingYaml(relativePath string) (*rbacv1.RoleBinding, error) { - var manifest *os.File - var err error - - var roleBinding rbacv1.RoleBinding - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "RoleBinding" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &roleBinding) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &roleBinding, nil -} diff --git a/test/e2e/framework/service.go b/test/e2e/framework/service.go deleted file mode 100644 index 09810bbc1..000000000 --- a/test/e2e/framework/service.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "fmt" - "io" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "os" - "time" - - "github.com/pkg/errors" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" -) - -func CreateService(kubeClient kubernetes.Interface, ns string, relativePath string) (finalizerFn, error) { - finalizerFn := func() error { - return DeleteService(kubeClient, ns, relativePath) - } - service, err := parseServiceYaml(relativePath) - if err != nil { - return finalizerFn, err - } - - service.Namespace = ns - - _, err = kubeClient.CoreV1().Services(ns).Get(context.TODO(), service.Name, metav1.GetOptions{}) - - if err == nil { - // Service already exists -> Update - _, err = kubeClient.CoreV1().Services(ns).Update(context.TODO(), service, metav1.UpdateOptions{}) - if err != nil { - return finalizerFn, err - } - } else { - // Service doesn't exists -> Create - _, err = kubeClient.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{}) - if err != nil { - return finalizerFn, err - } - } - - return finalizerFn, err -} - -func WaitForServiceReady(kubeClient kubernetes.Interface, namespace string, serviceName string) error { - err := wait.Poll(time.Second, time.Minute*5, func() (bool, error) { - endpoints, err := getEndpoints(kubeClient, namespace, serviceName) - if err != nil { - return false, err - } - if len(endpoints.Subsets) != 0 && len(endpoints.Subsets[0].Addresses) > 0 { - return true, nil - } - return false, nil - }) - return err -} - -func getEndpoints(kubeClient kubernetes.Interface, namespace, serviceName string) (*v1.Endpoints, error) { - endpoints, err := kubeClient.CoreV1().Endpoints(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("requesting endpoints for service %v failed", serviceName)) - } - return endpoints, nil -} - -func parseServiceYaml(relativePath string) (*v1.Service, error) { - var manifest *os.File - var err error - - var service v1.Service - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "Service" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &service) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &service, nil -} - -func DeleteService(kubeClient kubernetes.Interface, name string, namespace string) error { - return kubeClient.CoreV1().Services(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} diff --git a/test/e2e/framework/service_account.go b/test/e2e/framework/service_account.go deleted file mode 100644 index ceae8187d..000000000 --- a/test/e2e/framework/service_account.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "io" - "os" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" -) - -func CreateServiceAccount(kubeClient kubernetes.Interface, namespace string, relativePath string) (finalizerFn, error) { - finalizerFn := func() error { - return DeleteServiceAccount(kubeClient, namespace, relativePath) - } - - serviceAccount, err := parseServiceAccountYaml(relativePath) - if err != nil { - return finalizerFn, err - } - serviceAccount.Namespace = namespace - _, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), serviceAccount, metav1.CreateOptions{}) - if err != nil { - return finalizerFn, err - } - - return finalizerFn, nil -} - -func parseServiceAccountYaml(relativePath string) (*v1.ServiceAccount, error) { - var manifest *os.File - var err error - - var serviceAccount v1.ServiceAccount - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "ServiceAccount" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &serviceAccount) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &serviceAccount, nil -} - -func DeleteServiceAccount(kubeClient kubernetes.Interface, namespace string, relativePath string) error { - serviceAccount, err := parseServiceAccountYaml(relativePath) - if err != nil { - return err - } - - return kubeClient.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), serviceAccount.Name, metav1.DeleteOptions{}) -} diff --git a/test/e2e/framework/sparkapplication.go b/test/e2e/framework/sparkapplication.go deleted file mode 100644 index b9adab0eb..000000000 --- a/test/e2e/framework/sparkapplication.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/yaml" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" -) - -func MakeSparkApplicationFromYaml(pathToYaml string) (*v1beta2.SparkApplication, error) { - manifest, err := PathToOSFile(pathToYaml) - if err != nil { - return nil, err - } - tectonicPromOp := v1beta2.SparkApplication{} - if err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&tectonicPromOp); err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to decode file %s", pathToYaml)) - } - - return &tectonicPromOp, nil -} - -func CreateSparkApplication(crdclientset crdclientset.Interface, namespace string, sa *v1beta2.SparkApplication) error { - _, err := crdclientset.SparkoperatorV1beta2().SparkApplications(namespace).Create(context.TODO(), sa, metav1.CreateOptions{}) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to create SparkApplication %s", sa.Name)) - } - return nil -} - -func UpdateSparkApplication(crdclientset crdclientset.Interface, namespace string, sa *v1beta2.SparkApplication) error { - _, err := crdclientset.SparkoperatorV1beta2().SparkApplications(namespace).Update(context.TODO(), sa, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to update SparkApplication %s", sa.Name)) - } - return nil -} - -func GetSparkApplication(crdclientset crdclientset.Interface, namespace, name string) (*v1beta2.SparkApplication, error) { - sa, err := crdclientset.SparkoperatorV1beta2().SparkApplications(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return sa, nil -} - -func DeleteSparkApplication(crdclientset crdclientset.Interface, namespace, name string) error { - err := crdclientset.SparkoperatorV1beta2().SparkApplications(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - return err - } - return nil -} diff --git a/test/e2e/lifecycle_test.go b/test/e2e/lifecycle_test.go deleted file mode 100644 index 95b93a76b..000000000 --- a/test/e2e/lifecycle_test.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2019 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "container/list" - "context" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/wait" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - appFramework "github.com/kubeflow/spark-operator/test/e2e/framework" -) - -func TestLifeCycleManagement(t *testing.T) { - appName := "spark-pi" - app, err := appFramework.MakeSparkApplicationFromYaml("../../examples/spark-pi.yaml") - assert.Equal(t, nil, err) - - if appFramework.SparkTestNamespace != "" { - app.ObjectMeta.Namespace = appFramework.SparkTestNamespace - } - - if appFramework.SparkTestServiceAccount != "" { - app.Spec.Driver.ServiceAccount = &appFramework.SparkTestServiceAccount - } - - if appFramework.SparkTestImage != "" { - app.Spec.Image = &appFramework.SparkTestImage - } - - err = appFramework.CreateSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, app) - assert.Equal(t, nil, err) - - states := list.New() - status := GetJobStatus(t, appName) - states.PushBack(status) - - app = runApp(t, appName, states) - - newNumExecutors := int32(2) - app.Spec.Executor.Instances = &newNumExecutors - err = appFramework.UpdateSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, app) - assert.Equal(t, nil, err) - - status = GetJobStatus(t, appName) - if status != states.Back().Value { - states.PushBack(status) - } - - runApp(t, appName, states) - - assert.Equal(t, len(STATES), states.Len()) - index := 0 - for e := states.Front(); e != nil; e = e.Next() { - assert.Equal(t, STATES[index], string((e.Value).(v1beta2.ApplicationStateType))) - index += 1 - } - - err = appFramework.DeleteSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - assert.Equal(t, nil, err) -} - -func runApp(t *testing.T, appName string, states *list.List) *v1beta2.SparkApplication { - err := wait.Poll(INTERVAL, TIMEOUT, func() (done bool, err error) { - status := GetJobStatus(t, appName) - if status != states.Back().Value { - states.PushBack(status) - } - if status == "COMPLETED" { - return true, nil - } - return false, nil - }) - assert.Equal(t, nil, err) - - app, _ := appFramework.GetSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - podName := app.Status.DriverInfo.PodName - rawLogs, err := framework.KubeClient.CoreV1().Pods(appFramework.SparkTestNamespace).GetLogs(podName, &v1.PodLogOptions{}).Do(context.TODO()).Raw() - assert.Equal(t, nil, err) - assert.NotEqual(t, -1, strings.Index(string(rawLogs), "Pi is roughly 3")) - - return app -} diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go deleted file mode 100644 index 07b0a19ee..000000000 --- a/test/e2e/main_test.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "flag" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/stretchr/testify/assert" - "log" - "os" - "testing" - "time" - - operatorFramework "github.com/kubeflow/spark-operator/test/e2e/framework" -) - -var framework *operatorFramework.Framework - -// Wait for test job to finish. Poll for updates once a second. Time out after 240 seconds. -var TIMEOUT = 240 * time.Second -var INTERVAL = 1 * time.Second - -var STATES = [9]string{ - "", - "SUBMITTED", - "RUNNING", - "COMPLETED", - "INVALIDATING", - "PENDING_RERUN", - "SUBMITTED", - "RUNNING", - "COMPLETED", -} - -func GetJobStatus(t *testing.T, sparkAppName string) v1beta2.ApplicationStateType { - app, err := operatorFramework.GetSparkApplication(framework.SparkApplicationClient, operatorFramework.SparkTestNamespace, sparkAppName) - assert.Equal(t, nil, err) - return app.Status.AppState.State -} - -func TestMain(m *testing.M) { - kubeconfig := flag.String("kubeconfig", "", "kube config path, e.g. $HOME/.kube/config") - opImage := flag.String("operator-image", "", "operator image, e.g. image:tag") - opImagePullPolicy := flag.String("operator-image-pullPolicy", "IfNotPresent", "pull policy, e.g. Always") - ns := flag.String("namespace", "spark-operator", "e2e test namespace") - sparkTestNamespace := flag.String("spark", "spark", "e2e test spark-test-namespace") - sparkTestImage := flag.String("spark-test-image", "", "spark test image, e.g. image:tag") - sparkTestServiceAccount := flag.String("spark-test-service-account", "spark", "e2e test spark test service account") - flag.Parse() - - if *kubeconfig == "" { - log.Printf("No kubeconfig found. Bypassing e2e tests") - os.Exit(0) - } - var err error - if framework, err = operatorFramework.New(*ns, *sparkTestNamespace, *kubeconfig, *opImage, *opImagePullPolicy); err != nil { - log.Fatalf("failed to set up framework: %v\n", err) - } - - operatorFramework.SparkTestNamespace = *sparkTestNamespace - operatorFramework.SparkTestImage = *sparkTestImage - operatorFramework.SparkTestServiceAccount = *sparkTestServiceAccount - code := m.Run() - - if err := framework.Teardown(); err != nil { - log.Fatalf("failed to tear down framework: %v\n", err) - } - - os.Exit(code) -} diff --git a/test/e2e/sparkapplication_test.go b/test/e2e/sparkapplication_test.go new file mode 100644 index 000000000..a3e8829a0 --- /dev/null +++ b/test/e2e/sparkapplication_test.go @@ -0,0 +1,267 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e_test + +import ( + "context" + "os" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/util/yaml" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/util" +) + +const ( + PollInterval = 1 * time.Second + WaitTimeout = 300 * time.Second +) + +var _ = Describe("Example SparkApplication", func() { + Context("spark-pi", func() { + ctx := context.Background() + path := filepath.Join("..", "..", "examples", "spark-pi.yaml") + app := &v1beta2.SparkApplication{} + + BeforeEach(func() { + By("Parsing SparkApplication from file") + file, err := os.Open(path) + Expect(err).NotTo(HaveOccurred()) + Expect(file).NotTo(BeNil()) + + decoder := yaml.NewYAMLOrJSONDecoder(file, 100) + Expect(decoder).NotTo(BeNil()) + Expect(decoder.Decode(app)).NotTo(HaveOccurred()) + + By("Creating SparkApplication") + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + }) + + AfterEach(func() { + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + + It("should complete successfully", func() { + By("Waiting for SparkApplication to complete") + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + cancelCtx, cancelFunc := context.WithTimeout(ctx, WaitTimeout) + defer cancelFunc() + Expect(wait.PollUntilContextCancel(cancelCtx, PollInterval, true, func(ctx context.Context) (done bool, err error) { + err = k8sClient.Get(ctx, key, app) + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + return true, nil + } + return false, err + })).NotTo(HaveOccurred()) + + By("Checking out driver logs") + driverPodName := util.GetDriverPodName(app) + bytes, err := clientset.CoreV1().Pods(app.Namespace).GetLogs(driverPodName, &corev1.PodLogOptions{}).Do(ctx).Raw() + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).NotTo(BeEmpty()) + Expect(strings.Contains(string(bytes), "Pi is roughly 3")).To(BeTrue()) + }) + }) + + Context("spark-pi-configmap", func() { + ctx := context.Background() + path := filepath.Join("..", "..", "examples", "spark-pi-configmap.yaml") + app := &v1beta2.SparkApplication{} + + BeforeEach(func() { + By("Parsing SparkApplication from file") + file, err := os.Open(path) + Expect(err).NotTo(HaveOccurred()) + Expect(file).NotTo(BeNil()) + + decoder := yaml.NewYAMLOrJSONDecoder(file, 100) + Expect(decoder).NotTo(BeNil()) + Expect(decoder.Decode(app)).NotTo(HaveOccurred()) + + By("Creating ConfigMap") + for _, volume := range app.Spec.Volumes { + if volume.ConfigMap != nil { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: volume.ConfigMap.Name, + Namespace: app.Namespace, + }, + } + Expect(k8sClient.Create(ctx, configMap)).To(Succeed()) + } + } + + By("Creating SparkApplication") + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + }) + + AfterEach(func() { + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + volumes := app.Spec.Volumes + By("Deleting SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + + By("Deleting ConfigMap") + for _, volume := range volumes { + if volume.ConfigMap != nil { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: volume.ConfigMap.Name, + Namespace: app.Namespace, + }, + } + Expect(k8sClient.Delete(ctx, configMap)).To(Succeed()) + } + } + }) + + It("Should complete successfully", func() { + By("Waiting for SparkApplication to complete") + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + cancelCtx, cancelFunc := context.WithTimeout(ctx, WaitTimeout) + defer cancelFunc() + Expect(wait.PollUntilContextCancel(cancelCtx, PollInterval, true, func(ctx context.Context) (done bool, err error) { + err = k8sClient.Get(ctx, key, app) + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + return true, nil + } + return false, err + })).NotTo(HaveOccurred()) + + By("Checking out driver logs") + driverPodName := util.GetDriverPodName(app) + bytes, err := clientset.CoreV1().Pods(app.Namespace).GetLogs(driverPodName, &corev1.PodLogOptions{}).Do(ctx).Raw() + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).NotTo(BeEmpty()) + Expect(strings.Contains(string(bytes), "Pi is roughly 3")).To(BeTrue()) + }) + }) + + Context("spark-pi-custom-resource", func() { + ctx := context.Background() + path := filepath.Join("..", "..", "examples", "spark-pi-custom-resource.yaml") + app := &v1beta2.SparkApplication{} + + BeforeEach(func() { + By("Parsing SparkApplication from file") + file, err := os.Open(path) + Expect(err).NotTo(HaveOccurred()) + Expect(file).NotTo(BeNil()) + + decoder := yaml.NewYAMLOrJSONDecoder(file, 100) + Expect(decoder).NotTo(BeNil()) + Expect(decoder.Decode(app)).NotTo(HaveOccurred()) + + By("Creating SparkApplication") + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + }) + + AfterEach(func() { + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + + It("Should complete successfully", func() { + By("Waiting for SparkApplication to complete") + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + cancelCtx, cancelFunc := context.WithTimeout(ctx, WaitTimeout) + defer cancelFunc() + Expect(wait.PollUntilContextCancel(cancelCtx, PollInterval, true, func(ctx context.Context) (done bool, err error) { + err = k8sClient.Get(ctx, key, app) + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + return true, nil + } + return false, err + })).NotTo(HaveOccurred()) + + By("Checking out driver logs") + driverPodName := util.GetDriverPodName(app) + bytes, err := clientset.CoreV1().Pods(app.Namespace).GetLogs(driverPodName, &corev1.PodLogOptions{}).Do(ctx).Raw() + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).NotTo(BeEmpty()) + Expect(strings.Contains(string(bytes), "Pi is roughly 3")).To(BeTrue()) + }) + }) + + Context("spark-pi-python", func() { + ctx := context.Background() + path := filepath.Join("..", "..", "examples", "spark-pi-python.yaml") + app := &v1beta2.SparkApplication{} + + BeforeEach(func() { + By("Parsing SparkApplication from file") + file, err := os.Open(path) + Expect(err).NotTo(HaveOccurred()) + Expect(file).NotTo(BeNil()) + + decoder := yaml.NewYAMLOrJSONDecoder(file, 100) + Expect(decoder).NotTo(BeNil()) + Expect(decoder.Decode(app)).NotTo(HaveOccurred()) + + By("Creating SparkApplication") + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + }) + + AfterEach(func() { + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + + It("Should complete successfully", func() { + By("Waiting for SparkApplication to complete") + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + cancelCtx, cancelFunc := context.WithTimeout(ctx, WaitTimeout) + defer cancelFunc() + Expect(wait.PollUntilContextCancel(cancelCtx, PollInterval, true, func(ctx context.Context) (done bool, err error) { + err = k8sClient.Get(ctx, key, app) + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + return true, nil + } + return false, err + })).NotTo(HaveOccurred()) + + By("Checking out driver logs") + driverPodName := util.GetDriverPodName(app) + bytes, err := clientset.CoreV1().Pods(app.Namespace).GetLogs(driverPodName, &corev1.PodLogOptions{}).Do(ctx).Raw() + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).NotTo(BeEmpty()) + Expect(strings.Contains(string(bytes), "Pi is roughly 3")).To(BeTrue()) + }) + }) +}) diff --git a/test/e2e/suit_test.go b/test/e2e/suit_test.go new file mode 100644 index 000000000..4c60f9762 --- /dev/null +++ b/test/e2e/suit_test.go @@ -0,0 +1,159 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e_test + +import ( + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/cli" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/util" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +const ( + ReleaseName = "spark-operator" + ReleaseNamespace = "spark-operator" +) + +var ( + cfg *rest.Config + testEnv *envtest.Environment + k8sClient client.Client + clientset *kubernetes.Clientset +) + +func TestSparkOperator(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Spark Operator Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + var err error + + By("Bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + UseExistingCluster: util.BoolPtr(true), + } + + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = v1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + Expect(clientset).NotTo(BeNil()) + + By("Creating release namespace") + namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ReleaseNamespace}} + Expect(k8sClient.Create(context.TODO(), namespace)).NotTo(HaveOccurred()) + + By("Installing the Spark operator helm chart") + envSettings := cli.New() + envSettings.SetNamespace(ReleaseNamespace) + actionConfig := &action.Configuration{} + Expect(actionConfig.Init(envSettings.RESTClientGetter(), envSettings.Namespace(), os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) { + logf.Log.Info(fmt.Sprintf(format, v...)) + })).NotTo(HaveOccurred()) + installAction := action.NewInstall(actionConfig) + Expect(installAction).NotTo(BeNil()) + installAction.ReleaseName = ReleaseName + installAction.Namespace = envSettings.Namespace() + installAction.Wait = true + installAction.Timeout = 5 * time.Minute + chartPath := filepath.Join("..", "..", "charts", "spark-operator-chart") + chart, err := loader.Load(chartPath) + Expect(err).NotTo(HaveOccurred()) + Expect(chart).NotTo(BeNil()) + release, err := installAction.Run(chart, nil) + Expect(err).NotTo(HaveOccurred()) + Expect(release).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + By("Uninstalling the Spark operator helm chart") + envSettings := cli.New() + envSettings.SetNamespace(ReleaseNamespace) + actionConfig := &action.Configuration{} + Expect(actionConfig.Init(envSettings.RESTClientGetter(), envSettings.Namespace(), os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) { + logf.Log.Info(fmt.Sprintf(format, v...)) + })).NotTo(HaveOccurred()) + uninstallAction := action.NewUninstall(actionConfig) + Expect(uninstallAction).NotTo(BeNil()) + uninstallAction.Wait = true + uninstallAction.Timeout = 5 * time.Minute + resp, err := uninstallAction.Run(ReleaseName) + Expect(err).To(BeNil()) + Expect(resp).NotTo(BeNil()) + + By("Deleting release namespace") + namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ReleaseNamespace}} + Expect(k8sClient.Delete(context.TODO(), namespace)).NotTo(HaveOccurred()) + + By("Tearing down the test environment") + err = testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/test/e2e/volume_mount_test.go b/test/e2e/volume_mount_test.go deleted file mode 100644 index 2bb78a501..000000000 --- a/test/e2e/volume_mount_test.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This integration test verifies that a volume can be successfully -// mounted in the driver and executor pods. - -package e2e - -import ( - "regexp" - "testing" - - "github.com/stretchr/testify/assert" - - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/kubectl/pkg/describe" - - appFramework "github.com/kubeflow/spark-operator/test/e2e/framework" -) - -type describeClient struct { - T *testing.T - Namespace string - Err error - kubernetes.Interface -} - -func TestMountConfigMap(t *testing.T) { - appName := "spark-pi" - - sa, err := appFramework.MakeSparkApplicationFromYaml("../../examples/spark-pi-configmap.yaml") - assert.Equal(t, nil, err) - - if appFramework.SparkTestNamespace != "" { - sa.ObjectMeta.Namespace = appFramework.SparkTestNamespace - } - - if appFramework.SparkTestServiceAccount != "" { - sa.Spec.Driver.ServiceAccount = &appFramework.SparkTestServiceAccount - } - - if appFramework.SparkTestImage != "" { - sa.Spec.Image = &appFramework.SparkTestImage - } - - _, err = appFramework.CreateConfigMap(framework.KubeClient, "dummy-cm", appFramework.SparkTestNamespace) - assert.Equal(t, nil, err) - - err = appFramework.CreateSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, sa) - assert.Equal(t, nil, err) - - status := GetJobStatus(t, appName) - err = wait.Poll(INTERVAL, TIMEOUT, func() (done bool, err error) { - if status == "RUNNING" { - return true, nil - } - status = GetJobStatus(t, appName) - return false, nil - }) - assert.Equal(t, nil, err) - - app, err := appFramework.GetSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - assert.Equal(t, nil, err) - podName := app.Status.DriverInfo.PodName - - describeClient := &describeClient{T: t, Namespace: appFramework.SparkTestNamespace, Interface: framework.KubeClient} - describer := describe.PodDescriber{Interface: describeClient} - - podDesc, err := describer.Describe(appFramework.SparkTestNamespace, podName, describe.DescriberSettings{ShowEvents: true}) - assert.Equal(t, nil, err) - - matched, err := regexp.MatchString(`dummy-cm`, podDesc) - assert.Equal(t, true, matched) - assert.Equal(t, nil, err) - - err = appFramework.DeleteSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - assert.Equal(t, nil, err) -} diff --git a/version.go b/version.go new file mode 100644 index 000000000..e08232a51 --- /dev/null +++ b/version.go @@ -0,0 +1,90 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkoperator + +import ( + "fmt" + "runtime" +) + +type VersionInfo struct { + Version string + BuildDate string + GitCommit string + GitTag string + GitTreeState string + GoVersion string + Compiler string + Platform string +} + +var ( + version = "0.0.0" // value from VERSION file + buildDate = "1970-01-01T00:00:00Z" // output from `date -u +'%Y-%m-%dT%H:%M:%SZ'` + gitCommit = "" // output from `git rev-parse HEAD` + gitTag = "" // output from `git describe --exact-match --tags HEAD` (if clean tree state) + gitTreeState = "" // determined from `git status --porcelain`. either 'clean' or 'dirty' +) + +func getVersion() VersionInfo { + var versionStr string + if gitCommit != "" && gitTag != "" && gitTreeState == "clean" { + // if we have a clean tree state and the current commit is tagged, + // this is an official release. + versionStr = gitTag + } else { + // otherwise formulate a query version string based on as much metadata + // information we have available. + versionStr = version + if len(gitCommit) >= 7 { + versionStr += "+" + gitCommit[0:7] + if gitTreeState != "clean" { + versionStr += ".dirty" + } + } else { + versionStr += "+unknown" + } + } + return VersionInfo{ + Version: versionStr, + BuildDate: buildDate, + GitCommit: gitCommit, + GitTag: gitTag, + GitTreeState: gitTreeState, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} + +// PrintVersion info directly by command +func PrintVersion(short bool) { + v := getVersion() + fmt.Printf("Spark Operator Version: %s\n", v.Version) + if short { + return + } + fmt.Printf("Build Date: %s\n", v.BuildDate) + fmt.Printf("Git Commit ID: %s\n", v.GitCommit) + if v.GitTag != "" { + fmt.Printf("Git Tag: %s\n", v.GitTag) + } + fmt.Printf("Git Tree State: %s\n", v.GitTreeState) + fmt.Printf("Go Version: %s\n", v.GoVersion) + fmt.Printf("Compiler: %s\n", v.Compiler) + fmt.Printf("Platform: %s\n", v.Platform) +}