diff --git a/.chloggen/enabling-scrape-probe-configs.yaml b/.chloggen/enabling-scrape-probe-configs.yaml new file mode 100755 index 0000000000..b68325d698 --- /dev/null +++ b/.chloggen/enabling-scrape-probe-configs.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'enhancement' + +# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action) +component: target allocator + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Enabling configuration for Probe and ScrapeConfig + +# One or more tracking issues related to the change +issues: [1842] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml b/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml index fbdafffa74..4b88aae63e 100644 --- a/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml +++ b/bundle/manifests/opentelemetry-operator.clusterserviceversion.yaml @@ -99,7 +99,7 @@ metadata: categories: Logging & Tracing,Monitoring certified: "false" containerImage: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator - createdAt: "2024-07-10T09:05:22Z" + createdAt: "2024-07-18T10:04:15Z" description: Provides the OpenTelemetry components, including the Collector operators.operatorframework.io/builder: operator-sdk-v1.29.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 @@ -353,6 +353,8 @@ spec: - monitoring.coreos.com resources: - podmonitors + - probes + - scrapeconfigs - servicemonitors verbs: - create diff --git a/cmd/otel-allocator/config/config.go b/cmd/otel-allocator/config/config.go index 3e3fd389c7..1a502ba103 100644 --- a/cmd/otel-allocator/config/config.go +++ b/cmd/otel-allocator/config/config.go @@ -61,9 +61,13 @@ type Config struct { type PrometheusCRConfig struct { Enabled bool `yaml:"enabled,omitempty"` PodMonitorSelector *metav1.LabelSelector `yaml:"pod_monitor_selector,omitempty"` + ScrapeConfigSelector *metav1.LabelSelector `yaml:"scrape_config_selector,omitempty"` ServiceMonitorSelector *metav1.LabelSelector `yaml:"service_monitor_selector,omitempty"` + ProbeSelector *metav1.LabelSelector `yaml:"probe_selector,omitempty"` ServiceMonitorNamespaceSelector *metav1.LabelSelector `yaml:"service_monitor_namespace_selector,omitempty"` PodMonitorNamespaceSelector *metav1.LabelSelector `yaml:"pod_monitor_namespace_selector,omitempty"` + ProbeNamespaceSelector *metav1.LabelSelector `yaml:"probe_namespace_selector,omitempty"` + ScrapeConfigNamespaceSelector *metav1.LabelSelector `yaml:"scrape_config_namespace_selector,omitempty"` ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` } diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index 790c6c36cf..713747923c 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -75,8 +75,12 @@ func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocat ScrapeInterval: monitoringv1.Duration(cfg.PrometheusCR.ScrapeInterval.String()), ServiceMonitorSelector: cfg.PrometheusCR.ServiceMonitorSelector, PodMonitorSelector: cfg.PrometheusCR.PodMonitorSelector, + ProbeSelector: cfg.PrometheusCR.ProbeSelector, + ScrapeConfigSelector: cfg.PrometheusCR.ScrapeConfigSelector, ServiceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector, PodMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector, + ProbeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector, + ScrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector, }, }, } @@ -114,6 +118,8 @@ func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocat kubeConfigPath: cfg.KubeConfigFilePath, podMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector, serviceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector, + probeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector, + scrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector, resourceSelector: resourceSelector, store: store, }, nil @@ -131,6 +137,8 @@ type PrometheusCRWatcher struct { kubeConfigPath string podMonitorNamespaceSelector *metav1.LabelSelector serviceMonitorNamespaceSelector *metav1.LabelSelector + probeNamespaceSelector *metav1.LabelSelector + scrapeConfigNamespaceSelector *metav1.LabelSelector resourceSelector *prometheus.ResourceSelector store *assets.StoreBuilder } @@ -173,9 +181,21 @@ func getInformers(factory informers.FactoriesForNamespaces) (map[string]*informe return nil, err } + probeInformers, err := informers.NewInformersForResource(factory, monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.ProbeName)) + if err != nil { + return nil, err + } + + scrapeConfigInformers, err := informers.NewInformersForResource(factory, promv1alpha1.SchemeGroupVersion.WithResource(promv1alpha1.ScrapeConfigName)) + if err != nil { + return nil, err + } + return map[string]*informers.ForResource{ monitoringv1.ServiceMonitorName: serviceMonitorInformers, monitoringv1.PodMonitorName: podMonitorInformers, + monitoringv1.ProbeName: probeInformers, + promv1alpha1.ScrapeConfigName: scrapeConfigInformers, }, nil } @@ -205,6 +225,8 @@ func (w *PrometheusCRWatcher) Watch(upstreamEvents chan Event, upstreamErrors ch for name, selector := range map[string]*metav1.LabelSelector{ "PodMonitorNamespaceSelector": w.podMonitorNamespaceSelector, "ServiceMonitorNamespaceSelector": w.serviceMonitorNamespaceSelector, + "ProbeNamespaceSelector": w.probeNamespaceSelector, + "ScrapeConfigNamespaceSelector": w.scrapeConfigNamespaceSelector, } { sync, err := k8sutil.LabelSelectionHasChanged(old.Labels, cur.Labels, selector) if err != nil { @@ -319,6 +341,16 @@ func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Confi return nil, err } + probeInstances, err := w.resourceSelector.SelectProbes(ctx, w.informers[monitoringv1.ProbeName].ListAllByNamespace) + if err != nil { + return nil, err + } + + scrapeConfigInstances, err := w.resourceSelector.SelectScrapeConfigs(ctx, w.informers[promv1alpha1.ScrapeConfigName].ListAllByNamespace) + if err != nil { + return nil, err + } + generatedConfig, err := w.configGenerator.GenerateServerConfiguration( "30s", "", @@ -329,8 +361,8 @@ func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Confi nil, serviceMonitorInstances, podMonitorInstances, - map[string]*monitoringv1.Probe{}, - map[string]*promv1alpha1.ScrapeConfig{}, + probeInstances, + scrapeConfigInstances, w.store, nil, nil, diff --git a/cmd/otel-allocator/watcher/promOperator_test.go b/cmd/otel-allocator/watcher/promOperator_test.go index 180d34acd3..a93a8c2ad6 100644 --- a/cmd/otel-allocator/watcher/promOperator_test.go +++ b/cmd/otel-allocator/watcher/promOperator_test.go @@ -23,6 +23,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" "github.com/prometheus-operator/prometheus-operator/pkg/assets" fakemonitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake" "github.com/prometheus-operator/prometheus-operator/pkg/informers" @@ -34,6 +35,7 @@ import ( promconfig "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" kubeDiscovery "github.com/prometheus/prometheus/discovery/kubernetes" + "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" @@ -58,6 +60,8 @@ func TestLoadConfig(t *testing.T) { name string serviceMonitors []*monitoringv1.ServiceMonitor podMonitors []*monitoringv1.PodMonitor + scrapeConfigs []*promv1alpha1.ScrapeConfig + probes []*monitoringv1.Probe want *promconfig.Config wantErr bool cfg allocatorconfig.Config @@ -661,6 +665,72 @@ func TestLoadConfig(t *testing.T) { }, }, }, + { + name: "probe selector test", + probes: []*monitoringv1.Probe{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "probe-test-1", + Namespace: "test", + Labels: map[string]string{ + "testpod": "testpod", + }, + }, + Spec: monitoringv1.ProbeSpec{ + JobName: "probe/test/probe-1/0", + ProberSpec: monitoringv1.ProberSpec{ + URL: "localhost:50671", + Path: "/metrics", + }, + Targets: monitoringv1.ProbeTargets{ + StaticConfig: &monitoringv1.ProbeTargetStaticConfig{ + Targets: []string{"prometheus.io"}, + }, + }, + }, + }, + }, + cfg: allocatorconfig.Config{ + PrometheusCR: allocatorconfig.PrometheusCRConfig{ + ProbeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "testpod": "testpod", + }, + }, + }, + }, + want: &promconfig.Config{ + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "probe/test/probe-test-1", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeProtocols: defaultScrapeProtocols, + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + discovery.StaticConfig{ + &targetgroup.Group{ + Targets: []model.LabelSet{ + map[model.LabelName]model.LabelValue{ + "__address__": "prometheus.io", + }, + }, + Labels: map[model.LabelName]model.LabelValue{ + "namespace": "test", + }, + Source: "0", + }, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + EnableCompression: true, + }, + }, + }, + }, { name: "service monitor namespace selector test", serviceMonitors: []*monitoringv1.ServiceMonitor{ @@ -804,7 +874,7 @@ func TestLoadConfig(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - w, _ := getTestPrometheusCRWatcher(t, tt.serviceMonitors, tt.podMonitors, tt.cfg) + w, _ := getTestPrometheusCRWatcher(t, tt.serviceMonitors, tt.podMonitors, tt.probes, tt.scrapeConfigs, tt.cfg) // Start namespace informers in order to populate cache. go w.nsInformer.Run(w.stopChannel) @@ -909,7 +979,7 @@ func TestNamespaceLabelUpdate(t *testing.T) { ScrapeConfigs: []*promconfig.ScrapeConfig{}, } - w, source := getTestPrometheusCRWatcher(t, nil, podMonitors, cfg) + w, source := getTestPrometheusCRWatcher(t, nil, podMonitors, nil, nil, cfg) events := make(chan Event, 1) eventInterval := 5 * time.Millisecond @@ -975,7 +1045,7 @@ func TestRateLimit(t *testing.T) { eventInterval := 5 * time.Millisecond cfg := allocatorconfig.Config{} - w, _ := getTestPrometheusCRWatcher(t, nil, nil, cfg) + w, _ := getTestPrometheusCRWatcher(t, nil, nil, nil, nil, cfg) defer w.Close() w.eventInterval = eventInterval @@ -1038,7 +1108,7 @@ func TestRateLimit(t *testing.T) { // getTestPrometheusCRWatcher creates a test instance of PrometheusCRWatcher with fake clients // and test secrets. -func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.ServiceMonitor, podMonitors []*monitoringv1.PodMonitor, cfg allocatorconfig.Config) (*PrometheusCRWatcher, *fcache.FakeControllerSource) { +func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.ServiceMonitor, podMonitors []*monitoringv1.PodMonitor, probes []*monitoringv1.Probe, scrapeConfigs []*promv1alpha1.ScrapeConfig, cfg allocatorconfig.Config) (*PrometheusCRWatcher, *fcache.FakeControllerSource) { mClient := fakemonitoringclient.NewSimpleClientset() for _, sm := range svcMonitors { if sm != nil { @@ -1057,6 +1127,24 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic } } + for _, prb := range probes { + if prb != nil { + _, err := mClient.MonitoringV1().Probes(prb.Namespace).Create(context.Background(), prb, metav1.CreateOptions{}) + if err != nil { + t.Fatal(t, err) + } + } + } + + for _, scc := range scrapeConfigs { + if scc != nil { + _, err := mClient.MonitoringV1alpha1().ScrapeConfigs(scc.Namespace).Create(context.Background(), scc, metav1.CreateOptions{}) + if err != nil { + t.Fatal(t, err) + } + } + } + k8sClient := fake.NewSimpleClientset() _, err := k8sClient.CoreV1().Secrets("test").Create(context.Background(), &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -1091,8 +1179,12 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic ScrapeInterval: monitoringv1.Duration("30s"), ServiceMonitorSelector: cfg.PrometheusCR.ServiceMonitorSelector, PodMonitorSelector: cfg.PrometheusCR.PodMonitorSelector, + ProbeSelector: cfg.PrometheusCR.ProbeSelector, + ScrapeConfigSelector: cfg.PrometheusCR.ScrapeConfigSelector, ServiceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector, PodMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector, + ProbeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector, + ScrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector, }, }, } @@ -1132,6 +1224,8 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic configGenerator: generator, podMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector, serviceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector, + probeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector, + scrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector, resourceSelector: resourceSelector, store: store, }, source diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 7f896580e3..4db261daca 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -99,6 +99,8 @@ rules: - monitoring.coreos.com resources: - podmonitors + - probes + - scrapeconfigs - servicemonitors verbs: - create diff --git a/controllers/opentelemetrycollector_controller.go b/controllers/opentelemetrycollector_controller.go index ea6f8908d6..8fb9648a8b 100644 --- a/controllers/opentelemetrycollector_controller.go +++ b/controllers/opentelemetrycollector_controller.go @@ -23,6 +23,7 @@ import ( "github.com/go-logr/logr" routev1 "github.com/openshift/api/route/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" appsv1 "k8s.io/api/apps/v1" autoscalingv2 "k8s.io/api/autoscaling/v2" corev1 "k8s.io/api/core/v1" @@ -90,6 +91,8 @@ func (r *OpenTelemetryCollectorReconciler) findOtelOwnedObjects(ctx context.Cont ownedObjectTypes = append(ownedObjectTypes, &monitoringv1.ServiceMonitor{}, &monitoringv1.PodMonitor{}, + &monitoringv1.Probe{}, + &promv1alpha1.ScrapeConfig{}, ) } if params.Config.OpenShiftRoutesAvailability() == openshift.RoutesAvailable { @@ -205,7 +208,7 @@ func NewReconciler(p Params) *OpenTelemetryCollectorReconciler { // +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update -// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors;podmonitors,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors;podmonitors;scrapeconfigs;probes,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=route.openshift.io,resources=routes;routes/custom-host,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=config.openshift.io,resources=infrastructures;infrastructures/status,verbs=get;list;watch @@ -311,6 +314,8 @@ func (r *OpenTelemetryCollectorReconciler) SetupWithManager(mgr ctrl.Manager) er if featuregate.PrometheusOperatorIsAvailable.IsEnabled() && r.config.PrometheusCRAvailability() == prometheus.Available { builder.Owns(&monitoringv1.ServiceMonitor{}) builder.Owns(&monitoringv1.PodMonitor{}) + builder.Owns(&monitoringv1.Probe{}) + builder.Owns(&promv1alpha1.ScrapeConfig{}) } if r.config.OpenShiftRoutesAvailability() == openshift.RoutesAvailable { builder.Owns(&routev1.Route{}) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 219043c4a8..947a1d9ac1 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -27,6 +27,7 @@ import ( routev1 "github.com/openshift/api/route/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" @@ -133,6 +134,7 @@ func TestMain(m *testing.M) { } utilruntime.Must(monitoringv1.AddToScheme(testScheme)) + utilruntime.Must(promv1alpha1.AddToScheme(testScheme)) utilruntime.Must(networkingv1.AddToScheme(testScheme)) utilruntime.Must(routev1.AddToScheme(testScheme)) utilruntime.Must(v1alpha1.AddToScheme(testScheme)) @@ -140,7 +142,7 @@ func TestMain(m *testing.M) { testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - CRDs: []*apiextensionsv1.CustomResourceDefinition{testdata.OpenShiftRouteCRD, testdata.ServiceMonitorCRD, testdata.PodMonitorCRD}, + CRDs: []*apiextensionsv1.CustomResourceDefinition{testdata.OpenShiftRouteCRD, testdata.ServiceMonitorCRD, testdata.PodMonitorCRD, testdata.ProbeCRD, testdata.ScrapeConfigCRD}, WebhookInstallOptions: envtest.WebhookInstallOptions{ Paths: []string{filepath.Join("..", "config", "webhook")}, }, diff --git a/internal/manifests/collector/testdata/probe_crd.go b/internal/manifests/collector/testdata/probe_crd.go new file mode 100644 index 0000000000..d4aab97f79 --- /dev/null +++ b/internal/manifests/collector/testdata/probe_crd.go @@ -0,0 +1,38 @@ +package testdata + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ProbeCRD as go structure. +var ProbeCRD = &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "probes.monitoring.coreos.com", + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "monitoring.coreos.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: func(v bool) *bool { return &v }(true), + }, + }, + Subresources: &apiextensionsv1.CustomResourceSubresources{ + Status: &apiextensionsv1.CustomResourceSubresourceStatus{}, + }, + }, + }, + Scope: apiextensionsv1.NamespaceScoped, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "probes", + Singular: "probe", + Kind: "Probe", + }, + }, +} diff --git a/internal/manifests/collector/testdata/scs_crd.go b/internal/manifests/collector/testdata/scs_crd.go new file mode 100644 index 0000000000..4879ba3bb6 --- /dev/null +++ b/internal/manifests/collector/testdata/scs_crd.go @@ -0,0 +1,38 @@ +package testdata + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ScrapeConfigCRD as go structure. +var ScrapeConfigCRD = &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "scrapeconfigs.monitoring.coreos.com", + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "monitoring.coreos.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: func(v bool) *bool { return &v }(true), + }, + }, + Subresources: &apiextensionsv1.CustomResourceSubresources{ + Status: &apiextensionsv1.CustomResourceSubresourceStatus{}, + }, + }, + }, + Scope: apiextensionsv1.NamespaceScoped, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "scrapeconfigs", + Singular: "scrapeconfig", + Kind: "ScrapeConfig", + }, + }, +} diff --git a/main.go b/main.go index 520e1f1f5a..213d678b51 100644 --- a/main.go +++ b/main.go @@ -27,6 +27,7 @@ import ( routev1 "github.com/openshift/api/route/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" "github.com/spf13/pflag" colfeaturegate "go.opentelemetry.io/collector/featuregate" "go.uber.org/zap/zapcore" @@ -320,6 +321,7 @@ func main() { if cfg.PrometheusCRAvailability() == prometheus.Available { setupLog.Info("Prometheus CRDs are installed, adding to scheme.") utilruntime.Must(monitoringv1.AddToScheme(scheme)) + utilruntime.Must(promv1alpha1.AddToScheme(scheme)) } else { setupLog.Info("Prometheus CRDs are not installed, skipping adding to scheme.") }