From f2034e24e429adc5e80a0dc73b8eb2c0ebb87ad1 Mon Sep 17 00:00:00 2001 From: Jacob Aronoff Date: Tue, 3 Oct 2023 11:18:47 -0400 Subject: [PATCH 1/6] initial manifests --- .../opentelemetrycollector_webhook.go | 145 ++++++++++++------ .../opentelemetrycollector_webhook_test.go | 12 +- ...emetry-operator.clusterserviceversion.yaml | 14 +- controllers/builder_test.go | 33 ++-- .../opentelemetrycollector_controller.go | 1 + internal/manifests/collector/annotations.go | 7 +- .../manifests/collector/annotations_test.go | 15 +- internal/manifests/collector/collector.go | 19 +++ internal/manifests/collector/configmap.go | 22 +++ internal/manifests/collector/daemonset.go | 2 +- .../manifests/collector/daemonset_test.go | 21 +-- internal/manifests/collector/deployment.go | 2 +- .../manifests/collector/deployment_test.go | 21 +-- internal/manifests/collector/job.go | 79 ++++++++++ internal/manifests/collector/statefulset.go | 2 +- .../manifests/collector/statefulset_test.go | 21 +-- internal/manifests/collector/volume.go | 4 +- internal/manifests/collector/volume_test.go | 6 +- internal/manifests/mutate.go | 14 ++ internal/naming/main.go | 14 ++ main.go | 5 +- pkg/constants/env.go | 2 + tests/e2e/smoke-validation/00-assert.yaml | 34 ++++ tests/e2e/smoke-validation/00-install.yaml | 25 +++ tests/e2e/smoke-validation/01-assert.yaml | 54 +++++++ tests/e2e/smoke-validation/01-install.yaml | 27 ++++ 26 files changed, 477 insertions(+), 124 deletions(-) create mode 100644 internal/manifests/collector/job.go create mode 100644 tests/e2e/smoke-validation/00-assert.yaml create mode 100644 tests/e2e/smoke-validation/00-install.yaml create mode 100644 tests/e2e/smoke-validation/01-assert.yaml create mode 100644 tests/e2e/smoke-validation/01-install.yaml diff --git a/apis/v1alpha1/opentelemetrycollector_webhook.go b/apis/v1alpha1/opentelemetrycollector_webhook.go index 40b4df0506..b76f9f3bfc 100644 --- a/apis/v1alpha1/opentelemetrycollector_webhook.go +++ b/apis/v1alpha1/opentelemetrycollector_webhook.go @@ -15,12 +15,16 @@ package v1alpha1 import ( + "context" "fmt" + "net/http" + "github.com/go-logr/logr" + admissionv1 "k8s.io/api/admission/v1" autoscalingv2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -29,9 +33,65 @@ import ( "github.com/open-telemetry/opentelemetry-operator/pkg/featuregate" ) +const ( + validatingWebhookPath = "/validate-opentelemetry-io-v1alpha1-opentelemetrycollector" +) + // log is for logging in this package. var opentelemetrycollectorlog = logf.Log.WithName("opentelemetrycollector-resource") +type collectorValidatingWebhook struct { + client client.Client + decoder *admission.Decoder + logger logr.Logger +} + +func (c collectorValidatingWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { + otelcol := &OpenTelemetryCollector{} + err := c.decoder.DecodeRaw(req.Object, otelcol) + if err != nil { + c.logger.Error(err, "failed to decode message") + return admission.Errored(http.StatusBadRequest, err) + } + c.logger.Info("validating operation", "operation", req.Operation, "name", otelcol.GetName()) + var warnings []string + switch req.Operation { + // passthrough connect requests + case admissionv1.Connect: + case admissionv1.Create: + warnings, err = otelcol.validateCRDSpec() + case admissionv1.Update: + old := &OpenTelemetryCollector{} + err = c.decoder.DecodeRaw(req.OldObject, old) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + warnings, err = otelcol.validateCRDSpec() + case admissionv1.Delete: + // req.OldObject contains the object being deleted + old := &OpenTelemetryCollector{} + err = c.decoder.DecodeRaw(req.OldObject, old) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + default: + return admission.Errored(http.StatusExpectationFailed, fmt.Errorf("unknown operator: %s", req.Operation)) + } + if err != nil { + return admission.Denied(err.Error()).WithWarnings(warnings...) + } + return admission.Allowed("").WithWarnings(warnings...) +} + +func RegisterCollectorValidatingWebhook(mgr ctrl.Manager) { + cvw := &collectorValidatingWebhook{ + client: mgr.GetClient(), + logger: mgr.GetLogger().WithValues("handler", "collectorValidatingWebhook"), + decoder: admission.NewDecoder(mgr.GetScheme()), + } + mgr.GetWebhookServer().Register(validatingWebhookPath, &webhook.Admission{Handler: cvw}) +} + func (r *OpenTelemetryCollector) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). For(r). @@ -39,6 +99,8 @@ func (r *OpenTelemetryCollector) SetupWebhookWithManager(mgr ctrl.Manager) error } // +kubebuilder:webhook:path=/mutate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=true,failurePolicy=fail,groups=opentelemetry.io,resources=opentelemetrycollectors,verbs=create;update,versions=v1alpha1,name=mopentelemetrycollector.kb.io,sideEffects=none,admissionReviewVersions=v1 +// +kubebuilder:webhook:verbs=create;update,path=/validate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=false,failurePolicy=fail,groups=opentelemetry.io,resources=opentelemetrycollectors,versions=v1alpha1,name=vopentelemetrycollectorcreateupdate.kb.io,sideEffects=none,admissionReviewVersions=v1 +// +kubebuilder:webhook:verbs=delete,path=/validate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=false,failurePolicy=ignore,groups=opentelemetry.io,resources=opentelemetrycollectors,versions=v1alpha1,name=vopentelemetrycollectordelete.kb.io,sideEffects=none,admissionReviewVersions=v1 var _ webhook.Defaulter = &OpenTelemetryCollector{} @@ -99,72 +161,51 @@ func (r *OpenTelemetryCollector) Default() { } } -// +kubebuilder:webhook:verbs=create;update,path=/validate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=false,failurePolicy=fail,groups=opentelemetry.io,resources=opentelemetrycollectors,versions=v1alpha1,name=vopentelemetrycollectorcreateupdate.kb.io,sideEffects=none,admissionReviewVersions=v1 -// +kubebuilder:webhook:verbs=delete,path=/validate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=false,failurePolicy=ignore,groups=opentelemetry.io,resources=opentelemetrycollectors,versions=v1alpha1,name=vopentelemetrycollectordelete.kb.io,sideEffects=none,admissionReviewVersions=v1 - -var _ webhook.Validator = &OpenTelemetryCollector{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *OpenTelemetryCollector) ValidateCreate() (admission.Warnings, error) { - opentelemetrycollectorlog.Info("validate create", "name", r.Name) - return nil, r.validateCRDSpec() -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *OpenTelemetryCollector) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { - opentelemetrycollectorlog.Info("validate update", "name", r.Name) - return nil, r.validateCRDSpec() -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *OpenTelemetryCollector) ValidateDelete() (admission.Warnings, error) { - opentelemetrycollectorlog.Info("validate delete", "name", r.Name) - return nil, nil -} - -func (r *OpenTelemetryCollector) validateCRDSpec() error { +// validateCrdSpec adheres closely to the admission.Validate spec to allow the collector to validate its CRD spec. +func (r *OpenTelemetryCollector) validateCRDSpec() (admission.Warnings, error) { + warnings := admission.Warnings{} // validate volumeClaimTemplates if r.Spec.Mode != ModeStatefulSet && len(r.Spec.VolumeClaimTemplates) > 0 { - return fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'volumeClaimTemplates'", r.Spec.Mode) + return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'volumeClaimTemplates'", r.Spec.Mode) } // validate tolerations if r.Spec.Mode == ModeSidecar && len(r.Spec.Tolerations) > 0 { - return fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'tolerations'", r.Spec.Mode) + return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'tolerations'", r.Spec.Mode) } // validate priorityClassName if r.Spec.Mode == ModeSidecar && r.Spec.PriorityClassName != "" { - return fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'priorityClassName'", r.Spec.Mode) + return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'priorityClassName'", r.Spec.Mode) } // validate affinity if r.Spec.Mode == ModeSidecar && r.Spec.Affinity != nil { - return fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'affinity'", r.Spec.Mode) + return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'affinity'", r.Spec.Mode) } if r.Spec.Mode == ModeSidecar && len(r.Spec.AdditionalContainers) > 0 { - return fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'AdditionalContainers'", r.Spec.Mode) + return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'AdditionalContainers'", r.Spec.Mode) } // validate target allocation if r.Spec.TargetAllocator.Enabled && r.Spec.Mode != ModeStatefulSet { - return fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the target allocation deployment", r.Spec.Mode) + return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the target allocation deployment", r.Spec.Mode) } // validate Prometheus config for target allocation if r.Spec.TargetAllocator.Enabled { promCfg, err := ta.ConfigToPromConfig(r.Spec.Config) if err != nil { - return fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err) + return warnings, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err) } err = ta.ValidatePromConfig(promCfg, r.Spec.TargetAllocator.Enabled, featuregate.EnableTargetAllocatorRewrite.IsEnabled()) if err != nil { - return fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err) + return warnings, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err) } err = ta.ValidateTargetAllocatorConfig(r.Spec.TargetAllocator.PrometheusCR.Enabled, promCfg) if err != nil { - return fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err) + return warnings, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err) } } @@ -173,7 +214,7 @@ func (r *OpenTelemetryCollector) validateCRDSpec() error { nameErrs := validation.IsValidPortName(p.Name) numErrs := validation.IsValidPortNum(int(p.Port)) if len(nameErrs) > 0 || len(numErrs) > 0 { - return fmt.Errorf("the OpenTelemetry Spec Ports configuration is incorrect, port name '%s' errors: %s, num '%d' errors: %s", + return warnings, fmt.Errorf("the OpenTelemetry Spec Ports configuration is incorrect, port name '%s' errors: %s, num '%d' errors: %s", p.Name, nameErrs, p.Port, numErrs) } } @@ -184,7 +225,8 @@ func (r *OpenTelemetryCollector) validateCRDSpec() error { } // check deprecated .Spec.MaxReplicas if maxReplicas is not set - if *maxReplicas == 0 { + if *maxReplicas == 0 && r.Spec.MaxReplicas != nil { + warnings = append(warnings, "MaxReplicas is deprecated") maxReplicas = r.Spec.MaxReplicas } @@ -196,6 +238,7 @@ func (r *OpenTelemetryCollector) validateCRDSpec() error { // check deprecated .Spec.MinReplicas if minReplicas is not set if *minReplicas == 0 { if r.Spec.MinReplicas != nil { + warnings = append(warnings, "MinReplicas is deprecated") minReplicas = r.Spec.MinReplicas } else { minReplicas = r.Spec.Replicas @@ -203,7 +246,7 @@ func (r *OpenTelemetryCollector) validateCRDSpec() error { } if r.Spec.Ingress.Type == IngressTypeNginx && r.Spec.Mode == ModeSidecar { - return fmt.Errorf("the OpenTelemetry Spec Ingress configuration is incorrect. Ingress can only be used in combination with the modes: %s, %s, %s", + return warnings, fmt.Errorf("the OpenTelemetry Spec Ingress configuration is incorrect. Ingress can only be used in combination with the modes: %s, %s, %s", ModeDeployment, ModeDaemonSet, ModeStatefulSet, ) } @@ -211,57 +254,57 @@ func (r *OpenTelemetryCollector) validateCRDSpec() error { // validate autoscale with horizontal pod autoscaler if maxReplicas != nil { if *maxReplicas < int32(1) { - return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, maxReplicas should be defined and one or more") + return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, maxReplicas should be defined and one or more") } if r.Spec.Replicas != nil && *r.Spec.Replicas > *maxReplicas { - return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, replicas must not be greater than maxReplicas") + return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, replicas must not be greater than maxReplicas") } if minReplicas != nil && *minReplicas > *maxReplicas { - return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, minReplicas must not be greater than maxReplicas") + return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, minReplicas must not be greater than maxReplicas") } if minReplicas != nil && *minReplicas < int32(1) { - return fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, minReplicas should be one or more") + return warnings, fmt.Errorf("the OpenTelemetry Spec autoscale configuration is incorrect, minReplicas should be one or more") } if r.Spec.Autoscaler != nil { - return checkAutoscalerSpec(r.Spec.Autoscaler) + return warnings, checkAutoscalerSpec(r.Spec.Autoscaler) } } if r.Spec.Ingress.Type == IngressTypeNginx && r.Spec.Mode == ModeSidecar { - return fmt.Errorf("the OpenTelemetry Spec Ingress configuiration is incorrect. Ingress can only be used in combination with the modes: %s, %s, %s", + return warnings, fmt.Errorf("the OpenTelemetry Spec Ingress configuiration is incorrect. Ingress can only be used in combination with the modes: %s, %s, %s", ModeDeployment, ModeDaemonSet, ModeStatefulSet, ) } if r.Spec.Ingress.RuleType == IngressRuleTypeSubdomain && (r.Spec.Ingress.Hostname == "" || r.Spec.Ingress.Hostname == "*") { - return fmt.Errorf("a valid Ingress hostname has to be defined for subdomain ruleType") + return warnings, fmt.Errorf("a valid Ingress hostname has to be defined for subdomain ruleType") } if r.Spec.LivenessProbe != nil { if r.Spec.LivenessProbe.InitialDelaySeconds != nil && *r.Spec.LivenessProbe.InitialDelaySeconds < 0 { - return fmt.Errorf("the OpenTelemetry Spec LivenessProbe InitialDelaySeconds configuration is incorrect. InitialDelaySeconds should be greater than or equal to 0") + return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe InitialDelaySeconds configuration is incorrect. InitialDelaySeconds should be greater than or equal to 0") } if r.Spec.LivenessProbe.PeriodSeconds != nil && *r.Spec.LivenessProbe.PeriodSeconds < 1 { - return fmt.Errorf("the OpenTelemetry Spec LivenessProbe PeriodSeconds configuration is incorrect. PeriodSeconds should be greater than or equal to 1") + return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe PeriodSeconds configuration is incorrect. PeriodSeconds should be greater than or equal to 1") } if r.Spec.LivenessProbe.TimeoutSeconds != nil && *r.Spec.LivenessProbe.TimeoutSeconds < 1 { - return fmt.Errorf("the OpenTelemetry Spec LivenessProbe TimeoutSeconds configuration is incorrect. TimeoutSeconds should be greater than or equal to 1") + return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe TimeoutSeconds configuration is incorrect. TimeoutSeconds should be greater than or equal to 1") } if r.Spec.LivenessProbe.SuccessThreshold != nil && *r.Spec.LivenessProbe.SuccessThreshold < 1 { - return fmt.Errorf("the OpenTelemetry Spec LivenessProbe SuccessThreshold configuration is incorrect. SuccessThreshold should be greater than or equal to 1") + return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe SuccessThreshold configuration is incorrect. SuccessThreshold should be greater than or equal to 1") } if r.Spec.LivenessProbe.FailureThreshold != nil && *r.Spec.LivenessProbe.FailureThreshold < 1 { - return fmt.Errorf("the OpenTelemetry Spec LivenessProbe FailureThreshold configuration is incorrect. FailureThreshold should be greater than or equal to 1") + return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe FailureThreshold configuration is incorrect. FailureThreshold should be greater than or equal to 1") } if r.Spec.LivenessProbe.TerminationGracePeriodSeconds != nil && *r.Spec.LivenessProbe.TerminationGracePeriodSeconds < 1 { - return fmt.Errorf("the OpenTelemetry Spec LivenessProbe TerminationGracePeriodSeconds configuration is incorrect. TerminationGracePeriodSeconds should be greater than or equal to 1") + return warnings, fmt.Errorf("the OpenTelemetry Spec LivenessProbe TerminationGracePeriodSeconds configuration is incorrect. TerminationGracePeriodSeconds should be greater than or equal to 1") } } - return nil + return warnings, nil } func checkAutoscalerSpec(autoscaler *AutoscalerSpec) error { diff --git a/apis/v1alpha1/opentelemetrycollector_webhook_test.go b/apis/v1alpha1/opentelemetrycollector_webhook_test.go index ea44351e77..ac06f48a5a 100644 --- a/apis/v1alpha1/opentelemetrycollector_webhook_test.go +++ b/apis/v1alpha1/opentelemetrycollector_webhook_test.go @@ -180,9 +180,10 @@ func TestOTELColValidatingWebhook(t *testing.T) { five := int32(5) tests := []struct { //nolint:govet - name string - otelcol OpenTelemetryCollector - expectedErr string + name string + otelcol OpenTelemetryCollector + expectedErr string + expectedWarnings []string }{ { name: "valid empty spec", @@ -634,11 +635,14 @@ func TestOTELColValidatingWebhook(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - err := test.otelcol.validateCRDSpec() + warnings, err := test.otelcol.validateCRDSpec() if test.expectedErr == "" { assert.NoError(t, err) return } + if len(test.expectedWarnings) == 0 { + assert.Empty(t, warnings, test.expectedWarnings) + } assert.ErrorContains(t, err, test.expectedErr) }) } diff --git a/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml b/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml index 4486b92ffc..497fc6ed3c 100644 --- a/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml +++ b/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml @@ -31,7 +31,7 @@ metadata: categories: Logging & Tracing,Monitoring certified: "false" containerImage: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator - createdAt: "2023-09-20T15:05:15Z" + createdAt: "2023-09-29T16:46:40Z" description: Provides the OpenTelemetry components, including the Collector operators.operatorframework.io/builder: operator-sdk-v1.29.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 @@ -168,6 +168,18 @@ spec: - patch - update - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - coordination.k8s.io resources: diff --git a/controllers/builder_test.go b/controllers/builder_test.go index f93b31bd8c..b63923b1d4 100644 --- a/controllers/builder_test.go +++ b/controllers/builder_test.go @@ -28,6 +28,7 @@ import ( "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" "github.com/open-telemetry/opentelemetry-operator/internal/config" "github.com/open-telemetry/opentelemetry-operator/internal/manifests" + "github.com/open-telemetry/opentelemetry-operator/pkg/constants" ) var ( @@ -92,10 +93,10 @@ service: "app.kubernetes.io/version": "latest", }, Annotations: map[string]string{ - "opentelemetry-operator-config/sha256": "baf97852b8beb44fb46a120f8c31873ded3129088e50cd6c69f3208ba60bd661", - "prometheus.io/path": "/metrics", - "prometheus.io/port": "8888", - "prometheus.io/scrape": "true", + constants.CollectorConfigSHA: "baf97852b8beb44fb46a120f8c31873ded3129088e50cd6c69f3208ba60bd661", + "prometheus.io/path": "/metrics", + "prometheus.io/port": "8888", + "prometheus.io/scrape": "true", }, }, Spec: appsv1.DeploymentSpec{ @@ -114,10 +115,10 @@ service: "app.kubernetes.io/version": "latest", }, Annotations: map[string]string{ - "opentelemetry-operator-config/sha256": "baf97852b8beb44fb46a120f8c31873ded3129088e50cd6c69f3208ba60bd661", - "prometheus.io/path": "/metrics", - "prometheus.io/port": "8888", - "prometheus.io/scrape": "true", + constants.CollectorConfigSHA: "baf97852b8beb44fb46a120f8c31873ded3129088e50cd6c69f3208ba60bd661", + "prometheus.io/path": "/metrics", + "prometheus.io/port": "8888", + "prometheus.io/scrape": "true", }, }, Spec: corev1.PodSpec{ @@ -335,10 +336,10 @@ service: "app.kubernetes.io/version": "latest", }, Annotations: map[string]string{ - "opentelemetry-operator-config/sha256": "baf97852b8beb44fb46a120f8c31873ded3129088e50cd6c69f3208ba60bd661", - "prometheus.io/path": "/metrics", - "prometheus.io/port": "8888", - "prometheus.io/scrape": "true", + constants.CollectorConfigSHA: "baf97852b8beb44fb46a120f8c31873ded3129088e50cd6c69f3208ba60bd661", + "prometheus.io/path": "/metrics", + "prometheus.io/port": "8888", + "prometheus.io/scrape": "true", }, }, Spec: appsv1.DeploymentSpec{ @@ -357,10 +358,10 @@ service: "app.kubernetes.io/version": "latest", }, Annotations: map[string]string{ - "opentelemetry-operator-config/sha256": "baf97852b8beb44fb46a120f8c31873ded3129088e50cd6c69f3208ba60bd661", - "prometheus.io/path": "/metrics", - "prometheus.io/port": "8888", - "prometheus.io/scrape": "true", + constants.CollectorConfigSHA: "baf97852b8beb44fb46a120f8c31873ded3129088e50cd6c69f3208ba60bd661", + "prometheus.io/path": "/metrics", + "prometheus.io/port": "8888", + "prometheus.io/scrape": "true", }, }, Spec: corev1.PodSpec{ diff --git a/controllers/opentelemetrycollector_controller.go b/controllers/opentelemetrycollector_controller.go index ddd3d51bda..1b4397eca4 100644 --- a/controllers/opentelemetrycollector_controller.go +++ b/controllers/opentelemetrycollector_controller.go @@ -148,6 +148,7 @@ func NewReconciler(p Params) *OpenTelemetryCollectorReconciler { // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch // +kubebuilder:rbac:groups=apps,resources=daemonsets;deployments;statefulsets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update // +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete diff --git a/internal/manifests/collector/annotations.go b/internal/manifests/collector/annotations.go index 95d25f71d6..e967b5bbd0 100644 --- a/internal/manifests/collector/annotations.go +++ b/internal/manifests/collector/annotations.go @@ -19,6 +19,7 @@ import ( "fmt" "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" + "github.com/open-telemetry/opentelemetry-operator/pkg/constants" ) // Annotations return the annotations for OpenTelemetryCollector pod. @@ -38,7 +39,7 @@ func Annotations(instance v1alpha1.OpenTelemetryCollector) map[string]string { } } // make sure sha256 for configMap is always calculated - annotations["opentelemetry-operator-config/sha256"] = getConfigMapSHA(instance.Spec.Config) + annotations[constants.CollectorConfigSHA] = GetConfigMapSHA(instance.Spec.Config) return annotations } @@ -61,12 +62,12 @@ func PodAnnotations(instance v1alpha1.OpenTelemetryCollector) map[string]string } // make sure sha256 for configMap is always calculated - podAnnotations["opentelemetry-operator-config/sha256"] = getConfigMapSHA(instance.Spec.Config) + podAnnotations[constants.CollectorConfigSHA] = GetConfigMapSHA(instance.Spec.Config) return podAnnotations } -func getConfigMapSHA(config string) string { +func GetConfigMapSHA(config string) string { h := sha256.Sum256([]byte(config)) return fmt.Sprintf("%x", h) } diff --git a/internal/manifests/collector/annotations_test.go b/internal/manifests/collector/annotations_test.go index ade8f7fbab..a814004754 100644 --- a/internal/manifests/collector/annotations_test.go +++ b/internal/manifests/collector/annotations_test.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" + "github.com/open-telemetry/opentelemetry-operator/pkg/constants" ) func TestDefaultAnnotations(t *testing.T) { @@ -43,12 +44,12 @@ func TestDefaultAnnotations(t *testing.T) { assert.Equal(t, "true", annotations["prometheus.io/scrape"]) assert.Equal(t, "8888", annotations["prometheus.io/port"]) assert.Equal(t, "/metrics", annotations["prometheus.io/path"]) - assert.Equal(t, "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", annotations["opentelemetry-operator-config/sha256"]) + assert.Equal(t, "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", annotations[constants.CollectorConfigSHA]) //verify propagation from metadata.annotations to spec.template.spec.metadata.annotations assert.Equal(t, "true", podAnnotations["prometheus.io/scrape"]) assert.Equal(t, "8888", podAnnotations["prometheus.io/port"]) assert.Equal(t, "/metrics", podAnnotations["prometheus.io/path"]) - assert.Equal(t, "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", podAnnotations["opentelemetry-operator-config/sha256"]) + assert.Equal(t, "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", podAnnotations[constants.CollectorConfigSHA]) } func TestUserAnnotations(t *testing.T) { @@ -58,9 +59,9 @@ func TestUserAnnotations(t *testing.T) { Name: "my-instance", Namespace: "my-ns", Annotations: map[string]string{"prometheus.io/scrape": "false", - "prometheus.io/port": "1234", - "prometheus.io/path": "/test", - "opentelemetry-operator-config/sha256": "shouldBeOverwritten", + "prometheus.io/port": "1234", + "prometheus.io/path": "/test", + constants.CollectorConfigSHA: "shouldBeOverwritten", }, }, Spec: v1alpha1.OpenTelemetryCollectorSpec{ @@ -76,8 +77,8 @@ func TestUserAnnotations(t *testing.T) { assert.Equal(t, "false", annotations["prometheus.io/scrape"]) assert.Equal(t, "1234", annotations["prometheus.io/port"]) assert.Equal(t, "/test", annotations["prometheus.io/path"]) - assert.Equal(t, "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", annotations["opentelemetry-operator-config/sha256"]) - assert.Equal(t, "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", podAnnotations["opentelemetry-operator-config/sha256"]) + assert.Equal(t, "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", annotations[constants.CollectorConfigSHA]) + assert.Equal(t, "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", podAnnotations[constants.CollectorConfigSHA]) } func TestAnnotationsPropagateDown(t *testing.T) { diff --git a/internal/manifests/collector/collector.go b/internal/manifests/collector/collector.go index 1e64eb892b..29df46e2c6 100644 --- a/internal/manifests/collector/collector.go +++ b/internal/manifests/collector/collector.go @@ -22,6 +22,25 @@ import ( "github.com/open-telemetry/opentelemetry-operator/pkg/featuregate" ) +func BuildValidation(params manifests.Params) ([]client.Object, error) { + var resourceManifests []client.Object + var manifestFactories []manifests.K8sManifestFactory + manifestFactories = append(manifestFactories, []manifests.K8sManifestFactory{ + manifests.FactoryWithoutError(Job), + manifests.FactoryWithoutError(VersionedConfigMap), + manifests.FactoryWithoutError(ServiceAccount), + }...) + for _, factory := range manifestFactories { + res, err := factory(params) + if err != nil { + return nil, err + } else if manifests.ObjectIsNotNil(res) { + resourceManifests = append(resourceManifests, res) + } + } + return resourceManifests, nil +} + // Build creates the manifest for the collector resource. func Build(params manifests.Params) ([]client.Object, error) { var resourceManifests []client.Object diff --git a/internal/manifests/collector/configmap.go b/internal/manifests/collector/configmap.go index cdf2bdb7c8..69bafdc7ca 100644 --- a/internal/manifests/collector/configmap.go +++ b/internal/manifests/collector/configmap.go @@ -22,6 +22,28 @@ import ( "github.com/open-telemetry/opentelemetry-operator/internal/naming" ) +func VersionedConfigMap(params manifests.Params) *corev1.ConfigMap { + name := naming.VersionedConfigMap(params.OtelCol.Name, GetConfigMapSHA(params.OtelCol.Spec.Config)) + labels := Labels(params.OtelCol, name, []string{}) + + replacedConf, err := ReplaceConfig(params.OtelCol) + if err != nil { + params.Log.V(2).Info("failed to update prometheus config to use sharded targets: ", "err", err) + } + + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: params.OtelCol.Namespace, + Labels: labels, + Annotations: params.OtelCol.Annotations, + }, + Data: map[string]string{ + "collector.yaml": replacedConf, + }, + } +} + func ConfigMap(params manifests.Params) *corev1.ConfigMap { name := naming.ConfigMap(params.OtelCol.Name) labels := Labels(params.OtelCol, name, []string{}) diff --git a/internal/manifests/collector/daemonset.go b/internal/manifests/collector/daemonset.go index ab663d8acb..bb7029ecda 100644 --- a/internal/manifests/collector/daemonset.go +++ b/internal/manifests/collector/daemonset.go @@ -50,7 +50,7 @@ func DaemonSet(params manifests.Params) *appsv1.DaemonSet { ServiceAccountName: ServiceAccountName(params.OtelCol), InitContainers: params.OtelCol.Spec.InitContainers, Containers: append(params.OtelCol.Spec.AdditionalContainers, Container(params.Config, params.Log, params.OtelCol, true)), - Volumes: Volumes(params.Config, params.OtelCol), + Volumes: Volumes(params.Config, params.OtelCol, naming.ConfigMap(params.OtelCol.Name)), Tolerations: params.OtelCol.Spec.Tolerations, NodeSelector: params.OtelCol.Spec.NodeSelector, HostNetwork: params.OtelCol.Spec.HostNetwork, diff --git a/internal/manifests/collector/daemonset_test.go b/internal/manifests/collector/daemonset_test.go index 0a6dde9ec5..4ed2e9971a 100644 --- a/internal/manifests/collector/daemonset_test.go +++ b/internal/manifests/collector/daemonset_test.go @@ -25,6 +25,7 @@ import ( "github.com/open-telemetry/opentelemetry-operator/internal/config" "github.com/open-telemetry/opentelemetry-operator/internal/manifests" . "github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector" + "github.com/open-telemetry/opentelemetry-operator/pkg/constants" ) func TestDaemonSetNewDefault(t *testing.T) { @@ -58,10 +59,10 @@ func TestDaemonSetNewDefault(t *testing.T) { // verify sha256 podAnnotation expectedAnnotations := map[string]string{ - "opentelemetry-operator-config/sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "prometheus.io/path": "/metrics", - "prometheus.io/port": "8888", - "prometheus.io/scrape": "true", + constants.CollectorConfigSHA: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "prometheus.io/path": "/metrics", + "prometheus.io/port": "8888", + "prometheus.io/scrape": "true", } assert.Equal(t, expectedAnnotations, d.Spec.Template.Annotations) @@ -148,14 +149,14 @@ func TestDaemonsetPodAnnotations(t *testing.T) { ds := DaemonSet(params) // Add sha256 podAnnotation - testPodAnnotationValues["opentelemetry-operator-config/sha256"] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + testPodAnnotationValues[constants.CollectorConfigSHA] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" expectedAnnotations := map[string]string{ - "annotation-key": "annotation-value", - "opentelemetry-operator-config/sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "prometheus.io/path": "/metrics", - "prometheus.io/port": "8888", - "prometheus.io/scrape": "true", + "annotation-key": "annotation-value", + constants.CollectorConfigSHA: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "prometheus.io/path": "/metrics", + "prometheus.io/port": "8888", + "prometheus.io/scrape": "true", } // verify diff --git a/internal/manifests/collector/deployment.go b/internal/manifests/collector/deployment.go index d186819367..d77c1ae0cb 100644 --- a/internal/manifests/collector/deployment.go +++ b/internal/manifests/collector/deployment.go @@ -52,7 +52,7 @@ func Deployment(params manifests.Params) *appsv1.Deployment { ServiceAccountName: ServiceAccountName(params.OtelCol), InitContainers: params.OtelCol.Spec.InitContainers, Containers: append(params.OtelCol.Spec.AdditionalContainers, Container(params.Config, params.Log, params.OtelCol, true)), - Volumes: Volumes(params.Config, params.OtelCol), + Volumes: Volumes(params.Config, params.OtelCol, naming.ConfigMap(params.OtelCol.Name)), DNSPolicy: getDNSPolicy(params.OtelCol), HostNetwork: params.OtelCol.Spec.HostNetwork, Tolerations: params.OtelCol.Spec.Tolerations, diff --git a/internal/manifests/collector/deployment_test.go b/internal/manifests/collector/deployment_test.go index baa66f42ad..7de2b52cca 100644 --- a/internal/manifests/collector/deployment_test.go +++ b/internal/manifests/collector/deployment_test.go @@ -25,6 +25,7 @@ import ( "github.com/open-telemetry/opentelemetry-operator/internal/config" "github.com/open-telemetry/opentelemetry-operator/internal/manifests" . "github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector" + "github.com/open-telemetry/opentelemetry-operator/pkg/constants" ) var testTolerationValues = []v1.Toleration{ @@ -100,10 +101,10 @@ func TestDeploymentNewDefault(t *testing.T) { // verify sha256 podAnnotation expectedAnnotations := map[string]string{ - "opentelemetry-operator-config/sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "prometheus.io/path": "/metrics", - "prometheus.io/port": "8888", - "prometheus.io/scrape": "true", + constants.CollectorConfigSHA: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "prometheus.io/path": "/metrics", + "prometheus.io/port": "8888", + "prometheus.io/scrape": "true", } assert.Equal(t, expectedAnnotations, d.Spec.Template.Annotations) @@ -154,14 +155,14 @@ func TestDeploymentPodAnnotations(t *testing.T) { d := Deployment(params) // Add sha256 podAnnotation - testPodAnnotationValues["opentelemetry-operator-config/sha256"] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + testPodAnnotationValues[constants.CollectorConfigSHA] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" expectedPodAnnotationValues := map[string]string{ - "annotation-key": "annotation-value", - "opentelemetry-operator-config/sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "prometheus.io/path": "/metrics", - "prometheus.io/port": "8888", - "prometheus.io/scrape": "true", + "annotation-key": "annotation-value", + constants.CollectorConfigSHA: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "prometheus.io/path": "/metrics", + "prometheus.io/port": "8888", + "prometheus.io/scrape": "true", } // verify diff --git a/internal/manifests/collector/job.go b/internal/manifests/collector/job.go new file mode 100644 index 0000000000..5d0e08da7a --- /dev/null +++ b/internal/manifests/collector/job.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/open-telemetry/opentelemetry-operator/internal/manifests" + "github.com/open-telemetry/opentelemetry-operator/internal/naming" +) + +var ( + // backoffLimit is set to one because we don't need to retry this job, it either fails or succeeds. + backoffLimit int32 = 1 +) + +func Job(params manifests.Params) *batchv1.Job { + confMapSha := GetConfigMapSHA(params.OtelCol.Spec.Config) + name := naming.Job(params.OtelCol.Name, confMapSha) + labels := Labels(params.OtelCol, name, params.Config.LabelsFilter()) + + annotations := Annotations(params.OtelCol) + podAnnotations := PodAnnotations(params.OtelCol) + // manualSelector is explicitly false because we don't want to cause a potential conflict between the job + // and the replicaset + manualSelector := false + + c := Container(params.Config, params.Log, params.OtelCol, true) + c.Args = append([]string{"validate"}, c.Args...) + + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: params.OtelCol.Namespace, + Labels: labels, + Annotations: annotations, + }, + Spec: batchv1.JobSpec{ + ManualSelector: &manualSelector, + BackoffLimit: &backoffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Annotations: podAnnotations, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + ServiceAccountName: ServiceAccountName(params.OtelCol), + InitContainers: params.OtelCol.Spec.InitContainers, + Containers: append(params.OtelCol.Spec.AdditionalContainers, c), + Volumes: Volumes(params.Config, params.OtelCol, naming.VersionedConfigMap(params.OtelCol.Name, confMapSha)), + DNSPolicy: getDNSPolicy(params.OtelCol), + HostNetwork: params.OtelCol.Spec.HostNetwork, + Tolerations: params.OtelCol.Spec.Tolerations, + NodeSelector: params.OtelCol.Spec.NodeSelector, + SecurityContext: params.OtelCol.Spec.PodSecurityContext, + PriorityClassName: params.OtelCol.Spec.PriorityClassName, + Affinity: params.OtelCol.Spec.Affinity, + TerminationGracePeriodSeconds: params.OtelCol.Spec.TerminationGracePeriodSeconds, + TopologySpreadConstraints: params.OtelCol.Spec.TopologySpreadConstraints, + }, + }, + }, + } +} diff --git a/internal/manifests/collector/statefulset.go b/internal/manifests/collector/statefulset.go index 85afb33cc9..82f7de22a3 100644 --- a/internal/manifests/collector/statefulset.go +++ b/internal/manifests/collector/statefulset.go @@ -52,7 +52,7 @@ func StatefulSet(params manifests.Params) *appsv1.StatefulSet { ServiceAccountName: ServiceAccountName(params.OtelCol), InitContainers: params.OtelCol.Spec.InitContainers, Containers: append(params.OtelCol.Spec.AdditionalContainers, Container(params.Config, params.Log, params.OtelCol, true)), - Volumes: Volumes(params.Config, params.OtelCol), + Volumes: Volumes(params.Config, params.OtelCol, naming.ConfigMap(params.OtelCol.Name)), DNSPolicy: getDNSPolicy(params.OtelCol), HostNetwork: params.OtelCol.Spec.HostNetwork, Tolerations: params.OtelCol.Spec.Tolerations, diff --git a/internal/manifests/collector/statefulset_test.go b/internal/manifests/collector/statefulset_test.go index 332399ed7c..1385450837 100644 --- a/internal/manifests/collector/statefulset_test.go +++ b/internal/manifests/collector/statefulset_test.go @@ -28,6 +28,7 @@ import ( "github.com/open-telemetry/opentelemetry-operator/internal/config" "github.com/open-telemetry/opentelemetry-operator/internal/manifests" . "github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector" + "github.com/open-telemetry/opentelemetry-operator/pkg/constants" ) func TestStatefulSetNewDefault(t *testing.T) { @@ -65,10 +66,10 @@ func TestStatefulSetNewDefault(t *testing.T) { // verify sha256 podAnnotation expectedAnnotations := map[string]string{ - "opentelemetry-operator-config/sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "prometheus.io/path": "/metrics", - "prometheus.io/port": "8888", - "prometheus.io/scrape": "true", + constants.CollectorConfigSHA: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "prometheus.io/path": "/metrics", + "prometheus.io/port": "8888", + "prometheus.io/scrape": "true", } assert.Equal(t, expectedAnnotations, ss.Spec.Template.Annotations) @@ -194,14 +195,14 @@ func TestStatefulSetPodAnnotations(t *testing.T) { ss := StatefulSet(params) // Add sha256 podAnnotation - testPodAnnotationValues["opentelemetry-operator-config/sha256"] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + testPodAnnotationValues[constants.CollectorConfigSHA] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" expectedAnnotations := map[string]string{ - "annotation-key": "annotation-value", - "opentelemetry-operator-config/sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "prometheus.io/path": "/metrics", - "prometheus.io/port": "8888", - "prometheus.io/scrape": "true", + "annotation-key": "annotation-value", + constants.CollectorConfigSHA: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "prometheus.io/path": "/metrics", + "prometheus.io/port": "8888", + "prometheus.io/scrape": "true", } // verify assert.Equal(t, "my-instance-collector", ss.Name) diff --git a/internal/manifests/collector/volume.go b/internal/manifests/collector/volume.go index 6b014eba80..4a1c01bdfd 100644 --- a/internal/manifests/collector/volume.go +++ b/internal/manifests/collector/volume.go @@ -24,12 +24,12 @@ import ( ) // Volumes builds the volumes for the given instance, including the config map volume. -func Volumes(cfg config.Config, otelcol v1alpha1.OpenTelemetryCollector) []corev1.Volume { +func Volumes(cfg config.Config, otelcol v1alpha1.OpenTelemetryCollector, configMap string) []corev1.Volume { volumes := []corev1.Volume{{ Name: naming.ConfigMapVolume(), VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{Name: naming.ConfigMap(otelcol.Name)}, + LocalObjectReference: corev1.LocalObjectReference{Name: configMap}, Items: []corev1.KeyToPath{{ Key: cfg.CollectorConfigMapEntry(), Path: cfg.CollectorConfigMapEntry(), diff --git a/internal/manifests/collector/volume_test.go b/internal/manifests/collector/volume_test.go index 6232b39c4b..3e0d6d7155 100644 --- a/internal/manifests/collector/volume_test.go +++ b/internal/manifests/collector/volume_test.go @@ -32,7 +32,7 @@ func TestVolumeNewDefault(t *testing.T) { cfg := config.New() // test - volumes := Volumes(cfg, otelcol) + volumes := Volumes(cfg, otelcol, naming.ConfigMap(otelcol.Name)) // verify assert.Len(t, volumes, 1) @@ -53,7 +53,7 @@ func TestVolumeAllowsMoreToBeAdded(t *testing.T) { cfg := config.New() // test - volumes := Volumes(cfg, otelcol) + volumes := Volumes(cfg, otelcol, naming.ConfigMap(otelcol.Name)) // verify assert.Len(t, volumes, 2) @@ -78,7 +78,7 @@ func TestVolumeWithMoreConfigMaps(t *testing.T) { cfg := config.New() // test - volumes := Volumes(cfg, otelcol) + volumes := Volumes(cfg, otelcol, naming.ConfigMap(otelcol.Name)) // verify assert.Len(t, volumes, 3) diff --git a/internal/manifests/mutate.go b/internal/manifests/mutate.go index b11513f312..f8bc71fb1b 100644 --- a/internal/manifests/mutate.go +++ b/internal/manifests/mutate.go @@ -25,6 +25,7 @@ import ( appsv1 "k8s.io/api/apps/v1" autoscalingv2 "k8s.io/api/autoscaling/v2" autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -116,6 +117,11 @@ func MutateFuncFor(existing, desired client.Object) controllerutil.MutateFn { wantRb := desired.(*rbacv1.RoleBinding) mutateRoleBinding(rb, wantRb) + case *batchv1.Job: + dpl := existing.(*batchv1.Job) + wantDpl := desired.(*batchv1.Job) + return mutateJob(dpl, wantDpl) + case *appsv1.Deployment: dpl := existing.(*appsv1.Deployment) wantDpl := desired.(*appsv1.Deployment) @@ -269,6 +275,14 @@ func mutateDaemonset(existing, desired *appsv1.DaemonSet) error { return nil } +func mutateJob(existing, desired *batchv1.Job) error { + // We specify that we DO NOT supply a selector, therefore we should never override the given selector + if err := mergeWithOverride(&existing.Spec.Template, desired.Spec.Template); err != nil { + return err + } + return nil +} + func mutateDeployment(existing, desired *appsv1.Deployment) error { if !existing.CreationTimestamp.IsZero() && !apiequality.Semantic.DeepEqual(desired.Spec.Selector, existing.Spec.Selector) { return ImmutableChangeErr diff --git a/internal/naming/main.go b/internal/naming/main.go index b1ed7dd220..5528c381cb 100644 --- a/internal/naming/main.go +++ b/internal/naming/main.go @@ -15,11 +15,20 @@ // Package naming is for determining the names for components (containers, services, ...). package naming +func truncatedHash(configHash string) string { + return Truncate("%s", 16, configHash) +} + // ConfigMap builds the name for the config map used in the OpenTelemetryCollector containers. func ConfigMap(otelcol string) string { return DNSName(Truncate("%s-collector", 63, otelcol)) } +// VersionedConfigMap builds the name for the config map and hash used in the OpenTelemetryCollector containers. +func VersionedConfigMap(otelcol string, configHash string) string { + return DNSName(Truncate("%s-collector-%s", 63, otelcol, truncatedHash(configHash))) +} + // TAConfigMap returns the name for the config map used in the TargetAllocator. func TAConfigMap(otelcol string) string { return DNSName(Truncate("%s-targetallocator", 63, otelcol)) @@ -50,6 +59,11 @@ func TAContainer() string { return "ta-container" } +// Job builds the name of the job using the config hash. +func Job(otelcol string, configHash string) string { + return DNSName(Truncate("%s-collector-%s", 63, otelcol, truncatedHash(configHash))) +} + // Collector builds the collector (deployment/daemonset) name based on the instance. func Collector(otelcol string) string { return DNSName(Truncate("%s-collector", 63, otelcol)) diff --git a/main.go b/main.go index 61cb218a74..9bfea13a2f 100644 --- a/main.go +++ b/main.go @@ -244,10 +244,7 @@ func main() { } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = (&otelv1alpha1.OpenTelemetryCollector{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "OpenTelemetryCollector") - os.Exit(1) - } + otelv1alpha1.RegisterCollectorValidatingWebhook(mgr) if err = (&otelv1alpha1.Instrumentation{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ diff --git a/pkg/constants/env.go b/pkg/constants/env.go index 0c4070905c..4665ce00ea 100644 --- a/pkg/constants/env.go +++ b/pkg/constants/env.go @@ -25,4 +25,6 @@ const ( EnvPodName = "OTEL_RESOURCE_ATTRIBUTES_POD_NAME" EnvPodUID = "OTEL_RESOURCE_ATTRIBUTES_POD_UID" EnvNodeName = "OTEL_RESOURCE_ATTRIBUTES_NODE_NAME" + + CollectorConfigSHA = "opentelemetry-operator-config/sha256" ) diff --git a/tests/e2e/smoke-validation/00-assert.yaml b/tests/e2e/smoke-validation/00-assert.yaml new file mode 100644 index 0000000000..8cfa8c3cf0 --- /dev/null +++ b/tests/e2e/smoke-validation/00-assert.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simplest-collector +status: + unavailableReplicas: 1 + +--- + +apiVersion: v1 +kind: Service +metadata: + name: simplest-collector-headless +spec: + ports: + - appProtocol: grpc + name: jaeger-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + +--- + +apiVersion: v1 +kind: Service +metadata: + name: simplest-collector +spec: + ports: + - appProtocol: grpc + name: jaeger-grpc + port: 14250 + protocol: TCP + targetPort: 14250 diff --git a/tests/e2e/smoke-validation/00-install.yaml b/tests/e2e/smoke-validation/00-install.yaml new file mode 100644 index 0000000000..20f05434a6 --- /dev/null +++ b/tests/e2e/smoke-validation/00-install.yaml @@ -0,0 +1,25 @@ +# Install a bad config +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: simplest +spec: + runValidation: true + config: | + receivers: + jaeger: + protocols: + grpc: + otlp: + protocols: + processors: + + exporters: + logging: + + service: + pipelines: + traces: + receivers: [jaeger,otlp] + processors: [] + exporters: [logging] \ No newline at end of file diff --git a/tests/e2e/smoke-validation/01-assert.yaml b/tests/e2e/smoke-validation/01-assert.yaml new file mode 100644 index 0000000000..17365cdacd --- /dev/null +++ b/tests/e2e/smoke-validation/01-assert.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simplest-collector +status: + readyReplicas: 1 + +--- + +apiVersion: v1 +kind: Service +metadata: + name: simplest-collector-headless +spec: + ports: + - appProtocol: grpc + name: jaeger-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - appProtocol: grpc + name: otlp-grpc + port: 4317 + protocol: TCP + targetPort: 4317 + - appProtocol: http + name: otlp-http + port: 4318 + protocol: TCP + targetPort: 4318 + +--- + +apiVersion: v1 +kind: Service +metadata: + name: simplest-collector +spec: + ports: + - appProtocol: grpc + name: jaeger-grpc + port: 14250 + protocol: TCP + targetPort: 14250 + - appProtocol: grpc + name: otlp-grpc + port: 4317 + protocol: TCP + targetPort: 4317 + - appProtocol: http + name: otlp-http + port: 4318 + protocol: TCP + targetPort: 4318 diff --git a/tests/e2e/smoke-validation/01-install.yaml b/tests/e2e/smoke-validation/01-install.yaml new file mode 100644 index 0000000000..eeac3dcef7 --- /dev/null +++ b/tests/e2e/smoke-validation/01-install.yaml @@ -0,0 +1,27 @@ +# Install a good config which will replace the bad +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: simplest +spec: + runValidation: true + config: | + receivers: + jaeger: + protocols: + grpc: + otlp: + protocols: + grpc: + http: + processors: + + exporters: + logging: + + service: + pipelines: + traces: + receivers: [jaeger,otlp] + processors: [] + exporters: [logging] \ No newline at end of file From 8f13b973bfd557de7a14b04337a0ed48ea18f70b Mon Sep 17 00:00:00 2001 From: Jacob Aronoff Date: Tue, 3 Oct 2023 11:29:43 -0400 Subject: [PATCH 2/6] bundle --- ...opentelemetry-operator.clusterserviceversion.yaml | 2 +- config/rbac/role.yaml | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml b/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml index 497fc6ed3c..e938b110e6 100644 --- a/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml +++ b/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml @@ -31,7 +31,7 @@ metadata: categories: Logging & Tracing,Monitoring certified: "false" containerImage: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator - createdAt: "2023-09-29T16:46:40Z" + createdAt: "2023-10-03T15:28:54Z" description: Provides the OpenTelemetry components, including the Collector operators.operatorframework.io/builder: operator-sdk-v1.29.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 093332c890..1a33e90fab 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -66,6 +66,18 @@ rules: - patch - update - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - coordination.k8s.io resources: From e9e3c9c47b23ffb288e9b306e469f3727ecc948a Mon Sep 17 00:00:00 2001 From: Jacob Aronoff Date: Tue, 3 Oct 2023 15:17:29 -0400 Subject: [PATCH 3/6] setup webhook --- .../opentelemetrycollector_webhook.go | 72 ++++++++----------- controllers/suite_test.go | 5 +- main.go | 9 ++- 3 files changed, 42 insertions(+), 44 deletions(-) diff --git a/apis/v1alpha1/opentelemetrycollector_webhook.go b/apis/v1alpha1/opentelemetrycollector_webhook.go index b76f9f3bfc..505067c558 100644 --- a/apis/v1alpha1/opentelemetrycollector_webhook.go +++ b/apis/v1alpha1/opentelemetrycollector_webhook.go @@ -17,11 +17,10 @@ package v1alpha1 import ( "context" "fmt" - "net/http" "github.com/go-logr/logr" - admissionv1 "k8s.io/api/admission/v1" autoscalingv2 "k8s.io/api/autoscaling/v2" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -39,59 +38,48 @@ const ( // log is for logging in this package. var opentelemetrycollectorlog = logf.Log.WithName("opentelemetrycollector-resource") +var _ admission.CustomValidator = &CollectorValidatingWebhook{} -type collectorValidatingWebhook struct { +type CollectorValidatingWebhook struct { client client.Client decoder *admission.Decoder logger logr.Logger } -func (c collectorValidatingWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { - otelcol := &OpenTelemetryCollector{} - err := c.decoder.DecodeRaw(req.Object, otelcol) - if err != nil { - c.logger.Error(err, "failed to decode message") - return admission.Errored(http.StatusBadRequest, err) - } - c.logger.Info("validating operation", "operation", req.Operation, "name", otelcol.GetName()) - var warnings []string - switch req.Operation { - // passthrough connect requests - case admissionv1.Connect: - case admissionv1.Create: - warnings, err = otelcol.validateCRDSpec() - case admissionv1.Update: - old := &OpenTelemetryCollector{} - err = c.decoder.DecodeRaw(req.OldObject, old) - if err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - warnings, err = otelcol.validateCRDSpec() - case admissionv1.Delete: - // req.OldObject contains the object being deleted - old := &OpenTelemetryCollector{} - err = c.decoder.DecodeRaw(req.OldObject, old) - if err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - default: - return admission.Errored(http.StatusExpectationFailed, fmt.Errorf("unknown operator: %s", req.Operation)) +func (c CollectorValidatingWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + otelcol, ok := obj.(*OpenTelemetryCollector) + if !ok { + return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj) + } + return otelcol.validateCRDSpec() +} + +func (c CollectorValidatingWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings admission.Warnings, err error) { + otelcol, ok := newObj.(*OpenTelemetryCollector) + if !ok { + return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", newObj) } - if err != nil { - return admission.Denied(err.Error()).WithWarnings(warnings...) + return otelcol.validateCRDSpec() +} + +func (c CollectorValidatingWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + otelcol, ok := obj.(*OpenTelemetryCollector) + if !ok { + return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj) } - return admission.Allowed("").WithWarnings(warnings...) + return otelcol.validateCRDSpec() } -func RegisterCollectorValidatingWebhook(mgr ctrl.Manager) { - cvw := &collectorValidatingWebhook{ - client: mgr.GetClient(), - logger: mgr.GetLogger().WithValues("handler", "collectorValidatingWebhook"), - decoder: admission.NewDecoder(mgr.GetScheme()), +func NewCollectorValidatingWebhook(c client.Client, logger logr.Logger) *CollectorValidatingWebhook { + cvw := &CollectorValidatingWebhook{ + client: c, + logger: logger.WithValues("handler", "CollectorValidatingWebhook"), } - mgr.GetWebhookServer().Register(validatingWebhookPath, &webhook.Admission{Handler: cvw}) + return cvw } +func (c Coll) + func (r *OpenTelemetryCollector) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). For(r). diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 98e2562055..908bce3c34 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -125,7 +125,10 @@ func TestMain(m *testing.M) { os.Exit(1) } - if err = (&v1alpha1.OpenTelemetryCollector{}).SetupWebhookWithManager(mgr); err != nil { + if err = ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.OpenTelemetryCollector{}). + WithValidator(v1alpha1.NewCollectorValidatingWebhook(mgr.GetClient(), logger)). + Complete(); err != nil { fmt.Printf("failed to SetupWebhookWithManager: %v", err) os.Exit(1) } diff --git a/main.go b/main.go index 9bfea13a2f..d9facf5d9e 100644 --- a/main.go +++ b/main.go @@ -244,7 +244,14 @@ func main() { } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - otelv1alpha1.RegisterCollectorValidatingWebhook(mgr) + err := ctrl.NewWebhookManagedBy(mgr). + For(&otelv1alpha1.OpenTelemetryCollector{}). + WithValidator(otelv1alpha1.NewCollectorValidatingWebhook(mgr.GetClient(), logger)). + Complete() + if err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "OpenTelemetryCollector") + os.Exit(1) + } if err = (&otelv1alpha1.Instrumentation{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ From d0b921fceb9193f0d1b8184ad0ea4ff0af940670 Mon Sep 17 00:00:00 2001 From: Jacob Aronoff Date: Tue, 3 Oct 2023 16:28:17 -0400 Subject: [PATCH 4/6] new webhook style --- .../opentelemetrycollector_webhook.go | 66 ++--------------- .../opentelemetrycollector_webhook_test.go | 34 ++++++--- apis/v1alpha1/zz_generated.deepcopy.go | 4 +- controllers/suite_test.go | 6 +- internal/collectorwebhook/webhook.go | 70 +++++++++++++++++++ .../webhookhandler_suite_test.go | 3 +- main.go | 7 +- pkg/collector/reconcile/suite_test.go | 3 +- pkg/collector/upgrade/suite_test.go | 3 +- 9 files changed, 110 insertions(+), 86 deletions(-) create mode 100644 internal/collectorwebhook/webhook.go diff --git a/apis/v1alpha1/opentelemetrycollector_webhook.go b/apis/v1alpha1/opentelemetrycollector_webhook.go index 505067c558..c7a91f2f65 100644 --- a/apis/v1alpha1/opentelemetrycollector_webhook.go +++ b/apis/v1alpha1/opentelemetrycollector_webhook.go @@ -15,15 +15,10 @@ package v1alpha1 import ( - "context" "fmt" - "github.com/go-logr/logr" autoscalingv2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -32,59 +27,8 @@ import ( "github.com/open-telemetry/opentelemetry-operator/pkg/featuregate" ) -const ( - validatingWebhookPath = "/validate-opentelemetry-io-v1alpha1-opentelemetrycollector" -) - // log is for logging in this package. var opentelemetrycollectorlog = logf.Log.WithName("opentelemetrycollector-resource") -var _ admission.CustomValidator = &CollectorValidatingWebhook{} - -type CollectorValidatingWebhook struct { - client client.Client - decoder *admission.Decoder - logger logr.Logger -} - -func (c CollectorValidatingWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { - otelcol, ok := obj.(*OpenTelemetryCollector) - if !ok { - return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj) - } - return otelcol.validateCRDSpec() -} - -func (c CollectorValidatingWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings admission.Warnings, err error) { - otelcol, ok := newObj.(*OpenTelemetryCollector) - if !ok { - return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", newObj) - } - return otelcol.validateCRDSpec() -} - -func (c CollectorValidatingWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { - otelcol, ok := obj.(*OpenTelemetryCollector) - if !ok { - return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj) - } - return otelcol.validateCRDSpec() -} - -func NewCollectorValidatingWebhook(c client.Client, logger logr.Logger) *CollectorValidatingWebhook { - cvw := &CollectorValidatingWebhook{ - client: c, - logger: logger.WithValues("handler", "CollectorValidatingWebhook"), - } - return cvw -} - -func (c Coll) - -func (r *OpenTelemetryCollector) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} // +kubebuilder:webhook:path=/mutate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=true,failurePolicy=fail,groups=opentelemetry.io,resources=opentelemetrycollectors,verbs=create;update,versions=v1alpha1,name=mopentelemetrycollector.kb.io,sideEffects=none,admissionReviewVersions=v1 // +kubebuilder:webhook:verbs=create;update,path=/validate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=false,failurePolicy=fail,groups=opentelemetry.io,resources=opentelemetrycollectors,versions=v1alpha1,name=vopentelemetrycollectorcreateupdate.kb.io,sideEffects=none,admissionReviewVersions=v1 @@ -150,7 +94,7 @@ func (r *OpenTelemetryCollector) Default() { } // validateCrdSpec adheres closely to the admission.Validate spec to allow the collector to validate its CRD spec. -func (r *OpenTelemetryCollector) validateCRDSpec() (admission.Warnings, error) { +func (r *OpenTelemetryCollector) ValidateCRDSpec() (admission.Warnings, error) { warnings := admission.Warnings{} // validate volumeClaimTemplates if r.Spec.Mode != ModeStatefulSet && len(r.Spec.VolumeClaimTemplates) > 0 { @@ -207,24 +151,24 @@ func (r *OpenTelemetryCollector) validateCRDSpec() (admission.Warnings, error) { } } - maxReplicas := new(int32) + var maxReplicas *int32 if r.Spec.Autoscaler != nil && r.Spec.Autoscaler.MaxReplicas != nil { maxReplicas = r.Spec.Autoscaler.MaxReplicas } // check deprecated .Spec.MaxReplicas if maxReplicas is not set - if *maxReplicas == 0 && r.Spec.MaxReplicas != nil { + if maxReplicas == nil && r.Spec.MaxReplicas != nil { warnings = append(warnings, "MaxReplicas is deprecated") maxReplicas = r.Spec.MaxReplicas } - minReplicas := new(int32) + var minReplicas *int32 if r.Spec.Autoscaler != nil && r.Spec.Autoscaler.MinReplicas != nil { minReplicas = r.Spec.Autoscaler.MinReplicas } // check deprecated .Spec.MinReplicas if minReplicas is not set - if *minReplicas == 0 { + if minReplicas == nil { if r.Spec.MinReplicas != nil { warnings = append(warnings, "MinReplicas is deprecated") minReplicas = r.Spec.MinReplicas diff --git a/apis/v1alpha1/opentelemetrycollector_webhook_test.go b/apis/v1alpha1/opentelemetrycollector_webhook_test.go index ac06f48a5a..9b5d2adf7a 100644 --- a/apis/v1alpha1/opentelemetrycollector_webhook_test.go +++ b/apis/v1alpha1/opentelemetrycollector_webhook_test.go @@ -336,7 +336,8 @@ func TestOTELColValidatingWebhook(t *testing.T) { MaxReplicas: &zero, }, }, - expectedErr: "maxReplicas should be defined and one or more", + expectedErr: "maxReplicas should be defined and one or more", + expectedWarnings: []string{"MaxReplicas is deprecated"}, }, { name: "invalid replicas, greater than max", @@ -346,7 +347,8 @@ func TestOTELColValidatingWebhook(t *testing.T) { Replicas: &five, }, }, - expectedErr: "replicas must not be greater than maxReplicas", + expectedErr: "replicas must not be greater than maxReplicas", + expectedWarnings: []string{"MaxReplicas is deprecated"}, }, { name: "invalid min replicas, greater than max", @@ -356,7 +358,8 @@ func TestOTELColValidatingWebhook(t *testing.T) { MinReplicas: &five, }, }, - expectedErr: "minReplicas must not be greater than maxReplicas", + expectedErr: "minReplicas must not be greater than maxReplicas", + expectedWarnings: []string{"MaxReplicas is deprecated", "MinReplicas is deprecated"}, }, { name: "invalid min replicas, lesser than 1", @@ -366,7 +369,8 @@ func TestOTELColValidatingWebhook(t *testing.T) { MinReplicas: &zero, }, }, - expectedErr: "minReplicas should be one or more", + expectedErr: "minReplicas should be one or more", + expectedWarnings: []string{"MaxReplicas is deprecated", "MinReplicas is deprecated"}, }, { name: "invalid autoscaler scale down", @@ -382,7 +386,8 @@ func TestOTELColValidatingWebhook(t *testing.T) { }, }, }, - expectedErr: "scaleDown should be one or more", + expectedErr: "scaleDown should be one or more", + expectedWarnings: []string{"MaxReplicas is deprecated"}, }, { name: "invalid autoscaler scale up", @@ -398,7 +403,8 @@ func TestOTELColValidatingWebhook(t *testing.T) { }, }, }, - expectedErr: "scaleUp should be one or more", + expectedErr: "scaleUp should be one or more", + expectedWarnings: []string{"MaxReplicas is deprecated"}, }, { name: "invalid autoscaler target cpu utilization", @@ -410,7 +416,8 @@ func TestOTELColValidatingWebhook(t *testing.T) { }, }, }, - expectedErr: "targetCPUUtilization should be greater than 0 and less than 100", + expectedErr: "targetCPUUtilization should be greater than 0 and less than 100", + expectedWarnings: []string{"MaxReplicas is deprecated"}, }, { name: "autoscaler minReplicas is less than maxReplicas", @@ -438,7 +445,8 @@ func TestOTELColValidatingWebhook(t *testing.T) { }, }, }, - expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, metric type unsupported. Expected metric of source type Pod", + expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, metric type unsupported. Expected metric of source type Pod", + expectedWarnings: []string{"MaxReplicas is deprecated"}, }, { name: "invalid pod metric average value", @@ -463,7 +471,8 @@ func TestOTELColValidatingWebhook(t *testing.T) { }, }, }, - expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, average value should be greater than 0", + expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, average value should be greater than 0", + expectedWarnings: []string{"MaxReplicas is deprecated"}, }, { name: "utilization target is not valid with pod metrics", @@ -488,7 +497,8 @@ func TestOTELColValidatingWebhook(t *testing.T) { }, }, }, - expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, invalid pods target type", + expectedErr: "the OpenTelemetry Spec autoscale configuration is incorrect, invalid pods target type", + expectedWarnings: []string{"MaxReplicas is deprecated"}, }, { name: "invalid deployment mode incompabible with ingress settings", @@ -635,13 +645,15 @@ func TestOTELColValidatingWebhook(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - warnings, err := test.otelcol.validateCRDSpec() + warnings, err := test.otelcol.ValidateCRDSpec() if test.expectedErr == "" { assert.NoError(t, err) return } if len(test.expectedWarnings) == 0 { assert.Empty(t, warnings, test.expectedWarnings) + } else { + assert.ElementsMatch(t, warnings, test.expectedWarnings) } assert.ErrorContains(t, err, test.expectedErr) }) diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index d90a2d74dc..b9d883ce70 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -20,8 +20,8 @@ package v1alpha1 import ( - "k8s.io/api/autoscaling/v2" - "k8s.io/api/core/v1" + v2 "k8s.io/api/autoscaling/v2" + v1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 908bce3c34..a0cd232bb6 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -46,6 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" + "github.com/open-telemetry/opentelemetry-operator/internal/collectorwebhook" "github.com/open-telemetry/opentelemetry-operator/internal/config" "github.com/open-telemetry/opentelemetry-operator/internal/manifests" "github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector/testdata" @@ -125,10 +126,7 @@ func TestMain(m *testing.M) { os.Exit(1) } - if err = ctrl.NewWebhookManagedBy(mgr). - For(&v1alpha1.OpenTelemetryCollector{}). - WithValidator(v1alpha1.NewCollectorValidatingWebhook(mgr.GetClient(), logger)). - Complete(); err != nil { + if err = collectorwebhook.SetupCollectorValidatingWebhookWithManager(mgr); err != nil { fmt.Printf("failed to SetupWebhookWithManager: %v", err) os.Exit(1) } diff --git a/internal/collectorwebhook/webhook.go b/internal/collectorwebhook/webhook.go new file mode 100644 index 0000000000..0bfd6aa3ce --- /dev/null +++ b/internal/collectorwebhook/webhook.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectorwebhook + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" +) + +var _ admission.CustomValidator = &CollectorValidatingWebhook{} + +type CollectorValidatingWebhook struct { + logger logr.Logger + c client.Client +} + +func (c CollectorValidatingWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + otelcol, ok := obj.(*v1alpha1.OpenTelemetryCollector) + if !ok { + return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj) + } + return otelcol.ValidateCRDSpec() +} + +func (c CollectorValidatingWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings admission.Warnings, err error) { + otelcol, ok := newObj.(*v1alpha1.OpenTelemetryCollector) + if !ok { + return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", newObj) + } + return otelcol.ValidateCRDSpec() +} + +func (c CollectorValidatingWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + otelcol, ok := obj.(*v1alpha1.OpenTelemetryCollector) + if !ok { + return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj) + } + return otelcol.ValidateCRDSpec() +} + +func SetupCollectorValidatingWebhookWithManager(mgr controllerruntime.Manager) error { + cvw := &CollectorValidatingWebhook{ + c: mgr.GetClient(), + logger: mgr.GetLogger().WithValues("handler", "CollectorValidatingWebhook"), + } + return controllerruntime.NewWebhookManagedBy(mgr). + For(&v1alpha1.OpenTelemetryCollector{}). + WithValidator(cvw). + Complete() +} diff --git a/internal/webhookhandler/webhookhandler_suite_test.go b/internal/webhookhandler/webhookhandler_suite_test.go index 5c188e7ddd..845ef8d0df 100644 --- a/internal/webhookhandler/webhookhandler_suite_test.go +++ b/internal/webhookhandler/webhookhandler_suite_test.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" + "github.com/open-telemetry/opentelemetry-operator/internal/collectorwebhook" // +kubebuilder:scaffold:imports ) @@ -97,7 +98,7 @@ func TestMain(m *testing.M) { os.Exit(1) } - if err = (&v1alpha1.OpenTelemetryCollector{}).SetupWebhookWithManager(mgr); err != nil { + if err = collectorwebhook.SetupCollectorValidatingWebhookWithManager(mgr); err != nil { fmt.Printf("failed to SetupWebhookWithManager: %v", err) os.Exit(1) } diff --git a/main.go b/main.go index d9facf5d9e..12fdb0f4a6 100644 --- a/main.go +++ b/main.go @@ -46,6 +46,7 @@ import ( otelv1alpha1 "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" "github.com/open-telemetry/opentelemetry-operator/controllers" + "github.com/open-telemetry/opentelemetry-operator/internal/collectorwebhook" "github.com/open-telemetry/opentelemetry-operator/internal/config" "github.com/open-telemetry/opentelemetry-operator/internal/version" "github.com/open-telemetry/opentelemetry-operator/internal/webhookhandler" @@ -244,11 +245,7 @@ func main() { } if os.Getenv("ENABLE_WEBHOOKS") != "false" { - err := ctrl.NewWebhookManagedBy(mgr). - For(&otelv1alpha1.OpenTelemetryCollector{}). - WithValidator(otelv1alpha1.NewCollectorValidatingWebhook(mgr.GetClient(), logger)). - Complete() - if err != nil { + if err = collectorwebhook.SetupCollectorValidatingWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "OpenTelemetryCollector") os.Exit(1) } diff --git a/pkg/collector/reconcile/suite_test.go b/pkg/collector/reconcile/suite_test.go index 9feec99458..30752f12b8 100644 --- a/pkg/collector/reconcile/suite_test.go +++ b/pkg/collector/reconcile/suite_test.go @@ -49,6 +49,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" + "github.com/open-telemetry/opentelemetry-operator/internal/collectorwebhook" "github.com/open-telemetry/opentelemetry-operator/internal/config" "github.com/open-telemetry/opentelemetry-operator/internal/manifests" "github.com/open-telemetry/opentelemetry-operator/internal/manifests/collector/testdata" @@ -135,7 +136,7 @@ func TestMain(m *testing.M) { os.Exit(1) } - if err = (&v1alpha1.OpenTelemetryCollector{}).SetupWebhookWithManager(mgr); err != nil { + if err = collectorwebhook.SetupCollectorValidatingWebhookWithManager(mgr); err != nil { fmt.Printf("failed to SetupWebhookWithManager: %v", err) os.Exit(1) } diff --git a/pkg/collector/upgrade/suite_test.go b/pkg/collector/upgrade/suite_test.go index 63c5cf4934..20d337a594 100644 --- a/pkg/collector/upgrade/suite_test.go +++ b/pkg/collector/upgrade/suite_test.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" + "github.com/open-telemetry/opentelemetry-operator/internal/collectorwebhook" // +kubebuilder:scaffold:imports ) @@ -98,7 +99,7 @@ func TestMain(m *testing.M) { os.Exit(1) } - if err = (&v1alpha1.OpenTelemetryCollector{}).SetupWebhookWithManager(mgr); err != nil { + if err = collectorwebhook.SetupCollectorValidatingWebhookWithManager(mgr); err != nil { fmt.Printf("failed to SetupWebhookWithManager: %v", err) os.Exit(1) } From 0cb0959c2730eabbb321aabcf9e08757343b971d Mon Sep 17 00:00:00 2001 From: Jacob Aronoff Date: Tue, 3 Oct 2023 16:40:11 -0400 Subject: [PATCH 5/6] update webhook to default --- .../opentelemetrycollector_webhook.go | 5 +--- apis/v1alpha1/zz_generated.deepcopy.go | 4 +-- internal/collectorwebhook/webhook.go | 27 ++++++++++++++----- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/apis/v1alpha1/opentelemetrycollector_webhook.go b/apis/v1alpha1/opentelemetrycollector_webhook.go index c7a91f2f65..539d510b10 100644 --- a/apis/v1alpha1/opentelemetrycollector_webhook.go +++ b/apis/v1alpha1/opentelemetrycollector_webhook.go @@ -20,7 +20,6 @@ import ( autoscalingv2 "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/util/validation" logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ta "github.com/open-telemetry/opentelemetry-operator/internal/manifests/targetallocator/adapters" @@ -34,8 +33,6 @@ var opentelemetrycollectorlog = logf.Log.WithName("opentelemetrycollector-resour // +kubebuilder:webhook:verbs=create;update,path=/validate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=false,failurePolicy=fail,groups=opentelemetry.io,resources=opentelemetrycollectors,versions=v1alpha1,name=vopentelemetrycollectorcreateupdate.kb.io,sideEffects=none,admissionReviewVersions=v1 // +kubebuilder:webhook:verbs=delete,path=/validate-opentelemetry-io-v1alpha1-opentelemetrycollector,mutating=false,failurePolicy=ignore,groups=opentelemetry.io,resources=opentelemetrycollectors,versions=v1alpha1,name=vopentelemetrycollectordelete.kb.io,sideEffects=none,admissionReviewVersions=v1 -var _ webhook.Defaulter = &OpenTelemetryCollector{} - // Default implements webhook.Defaulter so a webhook will be registered for the type. func (r *OpenTelemetryCollector) Default() { opentelemetrycollectorlog.Info("default", "name", r.Name) @@ -93,7 +90,7 @@ func (r *OpenTelemetryCollector) Default() { } } -// validateCrdSpec adheres closely to the admission.Validate spec to allow the collector to validate its CRD spec. +// ValidateCRDSpec adheres closely to the admission.Validate spec to allow the collector to validate its CRD spec. func (r *OpenTelemetryCollector) ValidateCRDSpec() (admission.Warnings, error) { warnings := admission.Warnings{} // validate volumeClaimTemplates diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index b9d883ce70..d90a2d74dc 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -20,8 +20,8 @@ package v1alpha1 import ( - v2 "k8s.io/api/autoscaling/v2" - v1 "k8s.io/api/core/v1" + "k8s.io/api/autoscaling/v2" + "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" diff --git a/internal/collectorwebhook/webhook.go b/internal/collectorwebhook/webhook.go index 0bfd6aa3ce..bb5347e809 100644 --- a/internal/collectorwebhook/webhook.go +++ b/internal/collectorwebhook/webhook.go @@ -27,14 +27,26 @@ import ( "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" ) -var _ admission.CustomValidator = &CollectorValidatingWebhook{} +var ( + _ admission.CustomValidator = &Webhook{} + _ admission.CustomDefaulter = &Webhook{} +) -type CollectorValidatingWebhook struct { +type Webhook struct { logger logr.Logger c client.Client } -func (c CollectorValidatingWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { +func (c Webhook) Default(ctx context.Context, obj runtime.Object) error { + otelcol, ok := obj.(*v1alpha1.OpenTelemetryCollector) + if !ok { + return fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj) + } + otelcol.Default() + return nil +} + +func (c Webhook) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { otelcol, ok := obj.(*v1alpha1.OpenTelemetryCollector) if !ok { return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj) @@ -42,7 +54,7 @@ func (c CollectorValidatingWebhook) ValidateCreate(ctx context.Context, obj runt return otelcol.ValidateCRDSpec() } -func (c CollectorValidatingWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings admission.Warnings, err error) { +func (c Webhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings admission.Warnings, err error) { otelcol, ok := newObj.(*v1alpha1.OpenTelemetryCollector) if !ok { return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", newObj) @@ -50,7 +62,7 @@ func (c CollectorValidatingWebhook) ValidateUpdate(ctx context.Context, oldObj, return otelcol.ValidateCRDSpec() } -func (c CollectorValidatingWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { +func (c Webhook) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { otelcol, ok := obj.(*v1alpha1.OpenTelemetryCollector) if !ok { return nil, fmt.Errorf("expected an OpenTelemetryCollector, received %T", obj) @@ -59,12 +71,13 @@ func (c CollectorValidatingWebhook) ValidateDelete(ctx context.Context, obj runt } func SetupCollectorValidatingWebhookWithManager(mgr controllerruntime.Manager) error { - cvw := &CollectorValidatingWebhook{ + cvw := &Webhook{ c: mgr.GetClient(), - logger: mgr.GetLogger().WithValues("handler", "CollectorValidatingWebhook"), + logger: mgr.GetLogger().WithValues("handler", "Webhook"), } return controllerruntime.NewWebhookManagedBy(mgr). For(&v1alpha1.OpenTelemetryCollector{}). WithValidator(cvw). + WithDefaulter(cvw). Complete() } From 149b2ffa95a6429f7793586ab7ea23bc9e0ca890 Mon Sep 17 00:00:00 2001 From: Jacob Aronoff Date: Tue, 3 Oct 2023 16:53:16 -0400 Subject: [PATCH 6/6] working webhook refactor --- internal/config/main_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/config/main_test.go b/internal/config/main_test.go index 7bda3f64cd..db7117666d 100644 --- a/internal/config/main_test.go +++ b/internal/config/main_test.go @@ -80,7 +80,7 @@ func TestAutoDetectInBackground(t *testing.T) { } cfg := config.New( config.WithAutoDetect(mock), - config.WithAutoDetectFrequency(500*time.Second), + config.WithAutoDetectFrequency(5*time.Second), ) // sanity check