Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enabling scrapeConfig and Probe #3103

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .chloggen/enabling-scrape-probe-configs.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: 'enhancement'

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: target allocator

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Enabling configuration for Probe and ScrapeConfig

# One or more tracking issues related to the change
issues: [1842]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ metadata:
categories: Logging & Tracing,Monitoring
certified: "false"
containerImage: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator
createdAt: "2024-07-10T09:05:22Z"
createdAt: "2024-07-18T10:04:15Z"
description: Provides the OpenTelemetry components, including the Collector
operators.operatorframework.io/builder: operator-sdk-v1.29.0
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
Expand Down Expand Up @@ -353,6 +353,8 @@ spec:
- monitoring.coreos.com
resources:
- podmonitors
- probes
- scrapeconfigs
- servicemonitors
verbs:
- create
Expand Down
4 changes: 4 additions & 0 deletions cmd/otel-allocator/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,13 @@ type Config struct {
type PrometheusCRConfig struct {
Enabled bool `yaml:"enabled,omitempty"`
PodMonitorSelector *metav1.LabelSelector `yaml:"pod_monitor_selector,omitempty"`
ScrapeConfigSelector *metav1.LabelSelector `yaml:"scrape_config_selector,omitempty"`
ServiceMonitorSelector *metav1.LabelSelector `yaml:"service_monitor_selector,omitempty"`
ProbeSelector *metav1.LabelSelector `yaml:"probe_selector,omitempty"`
ServiceMonitorNamespaceSelector *metav1.LabelSelector `yaml:"service_monitor_namespace_selector,omitempty"`
PodMonitorNamespaceSelector *metav1.LabelSelector `yaml:"pod_monitor_namespace_selector,omitempty"`
ProbeNamespaceSelector *metav1.LabelSelector `yaml:"probe_namespace_selector,omitempty"`
ScrapeConfigNamespaceSelector *metav1.LabelSelector `yaml:"scrape_config_namespace_selector,omitempty"`
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
}

Expand Down
36 changes: 34 additions & 2 deletions cmd/otel-allocator/watcher/promOperator.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,12 @@ func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocat
ScrapeInterval: monitoringv1.Duration(cfg.PrometheusCR.ScrapeInterval.String()),
ServiceMonitorSelector: cfg.PrometheusCR.ServiceMonitorSelector,
PodMonitorSelector: cfg.PrometheusCR.PodMonitorSelector,
ProbeSelector: cfg.PrometheusCR.ProbeSelector,
ScrapeConfigSelector: cfg.PrometheusCR.ScrapeConfigSelector,
ServiceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector,
PodMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector,
ProbeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector,
ScrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector,
},
},
}
Expand Down Expand Up @@ -114,6 +118,8 @@ func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocat
kubeConfigPath: cfg.KubeConfigFilePath,
podMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector,
serviceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector,
probeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector,
scrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector,
resourceSelector: resourceSelector,
store: store,
}, nil
Expand All @@ -131,6 +137,8 @@ type PrometheusCRWatcher struct {
kubeConfigPath string
podMonitorNamespaceSelector *metav1.LabelSelector
serviceMonitorNamespaceSelector *metav1.LabelSelector
probeNamespaceSelector *metav1.LabelSelector
scrapeConfigNamespaceSelector *metav1.LabelSelector
resourceSelector *prometheus.ResourceSelector
store *assets.StoreBuilder
}
Expand Down Expand Up @@ -173,9 +181,21 @@ func getInformers(factory informers.FactoriesForNamespaces) (map[string]*informe
return nil, err
}

probeInformers, err := informers.NewInformersForResource(factory, monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.ProbeName))
if err != nil {
return nil, err
}

scrapeConfigInformers, err := informers.NewInformersForResource(factory, promv1alpha1.SchemeGroupVersion.WithResource(promv1alpha1.ScrapeConfigName))
if err != nil {
return nil, err
}

return map[string]*informers.ForResource{
monitoringv1.ServiceMonitorName: serviceMonitorInformers,
monitoringv1.PodMonitorName: podMonitorInformers,
monitoringv1.ProbeName: probeInformers,
promv1alpha1.ScrapeConfigName: scrapeConfigInformers,
}, nil
}

Expand Down Expand Up @@ -205,6 +225,8 @@ func (w *PrometheusCRWatcher) Watch(upstreamEvents chan Event, upstreamErrors ch
for name, selector := range map[string]*metav1.LabelSelector{
"PodMonitorNamespaceSelector": w.podMonitorNamespaceSelector,
"ServiceMonitorNamespaceSelector": w.serviceMonitorNamespaceSelector,
"ProbeNamespaceSelector": w.probeNamespaceSelector,
"ScrapeConfigNamespaceSelector": w.scrapeConfigNamespaceSelector,
} {
sync, err := k8sutil.LabelSelectionHasChanged(old.Labels, cur.Labels, selector)
if err != nil {
Expand Down Expand Up @@ -319,6 +341,16 @@ func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Confi
return nil, err
}

probeInstances, err := w.resourceSelector.SelectProbes(ctx, w.informers[monitoringv1.ProbeName].ListAllByNamespace)
if err != nil {
return nil, err
}

scrapeConfigInstances, err := w.resourceSelector.SelectScrapeConfigs(ctx, w.informers[promv1alpha1.ScrapeConfigName].ListAllByNamespace)
if err != nil {
return nil, err
}

generatedConfig, err := w.configGenerator.GenerateServerConfiguration(
"30s",
"",
Expand All @@ -329,8 +361,8 @@ func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Confi
nil,
serviceMonitorInstances,
podMonitorInstances,
map[string]*monitoringv1.Probe{},
map[string]*promv1alpha1.ScrapeConfig{},
probeInstances,
scrapeConfigInstances,
w.store,
nil,
nil,
Expand Down
102 changes: 98 additions & 4 deletions cmd/otel-allocator/watcher/promOperator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
"github.com/prometheus-operator/prometheus-operator/pkg/assets"
fakemonitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake"
"github.com/prometheus-operator/prometheus-operator/pkg/informers"
Expand All @@ -34,6 +35,7 @@ import (
promconfig "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery"
kubeDiscovery "github.com/prometheus/prometheus/discovery/kubernetes"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
Expand All @@ -58,6 +60,8 @@ func TestLoadConfig(t *testing.T) {
name string
serviceMonitors []*monitoringv1.ServiceMonitor
podMonitors []*monitoringv1.PodMonitor
scrapeConfigs []*promv1alpha1.ScrapeConfig
probes []*monitoringv1.Probe
want *promconfig.Config
wantErr bool
cfg allocatorconfig.Config
Expand Down Expand Up @@ -661,6 +665,72 @@ func TestLoadConfig(t *testing.T) {
},
},
},
{
name: "probe selector test",
probes: []*monitoringv1.Probe{
{
ObjectMeta: metav1.ObjectMeta{
Name: "probe-test-1",
Namespace: "test",
Labels: map[string]string{
"testpod": "testpod",
},
},
Spec: monitoringv1.ProbeSpec{
JobName: "probe/test/probe-1/0",
ProberSpec: monitoringv1.ProberSpec{
URL: "localhost:50671",
Path: "/metrics",
},
Targets: monitoringv1.ProbeTargets{
StaticConfig: &monitoringv1.ProbeTargetStaticConfig{
Targets: []string{"prometheus.io"},
},
},
},
},
},
cfg: allocatorconfig.Config{
PrometheusCR: allocatorconfig.PrometheusCRConfig{
ProbeSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"testpod": "testpod",
},
},
},
},
want: &promconfig.Config{
ScrapeConfigs: []*promconfig.ScrapeConfig{
{
JobName: "probe/test/probe-test-1",
ScrapeInterval: model.Duration(30 * time.Second),
ScrapeProtocols: defaultScrapeProtocols,
ScrapeTimeout: model.Duration(10 * time.Second),
HonorTimestamps: true,
HonorLabels: false,
Scheme: "http",
MetricsPath: "/metrics",
ServiceDiscoveryConfigs: []discovery.Config{
discovery.StaticConfig{
&targetgroup.Group{
Targets: []model.LabelSet{
map[model.LabelName]model.LabelValue{
"__address__": "prometheus.io",
},
},
Labels: map[model.LabelName]model.LabelValue{
"namespace": "test",
},
Source: "0",
},
},
},
HTTPClientConfig: config.DefaultHTTPClientConfig,
EnableCompression: true,
},
},
},
},
{
name: "service monitor namespace selector test",
serviceMonitors: []*monitoringv1.ServiceMonitor{
Expand Down Expand Up @@ -804,7 +874,7 @@ func TestLoadConfig(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
w, _ := getTestPrometheusCRWatcher(t, tt.serviceMonitors, tt.podMonitors, tt.cfg)
w, _ := getTestPrometheusCRWatcher(t, tt.serviceMonitors, tt.podMonitors, tt.probes, tt.scrapeConfigs, tt.cfg)

// Start namespace informers in order to populate cache.
go w.nsInformer.Run(w.stopChannel)
Expand Down Expand Up @@ -909,7 +979,7 @@ func TestNamespaceLabelUpdate(t *testing.T) {
ScrapeConfigs: []*promconfig.ScrapeConfig{},
}

w, source := getTestPrometheusCRWatcher(t, nil, podMonitors, cfg)
w, source := getTestPrometheusCRWatcher(t, nil, podMonitors, nil, nil, cfg)
events := make(chan Event, 1)
eventInterval := 5 * time.Millisecond

Expand Down Expand Up @@ -975,7 +1045,7 @@ func TestRateLimit(t *testing.T) {
eventInterval := 5 * time.Millisecond
cfg := allocatorconfig.Config{}

w, _ := getTestPrometheusCRWatcher(t, nil, nil, cfg)
w, _ := getTestPrometheusCRWatcher(t, nil, nil, nil, nil, cfg)
defer w.Close()
w.eventInterval = eventInterval

Expand Down Expand Up @@ -1038,7 +1108,7 @@ func TestRateLimit(t *testing.T) {

// getTestPrometheusCRWatcher creates a test instance of PrometheusCRWatcher with fake clients
// and test secrets.
func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.ServiceMonitor, podMonitors []*monitoringv1.PodMonitor, cfg allocatorconfig.Config) (*PrometheusCRWatcher, *fcache.FakeControllerSource) {
func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.ServiceMonitor, podMonitors []*monitoringv1.PodMonitor, probes []*monitoringv1.Probe, scrapeConfigs []*promv1alpha1.ScrapeConfig, cfg allocatorconfig.Config) (*PrometheusCRWatcher, *fcache.FakeControllerSource) {
mClient := fakemonitoringclient.NewSimpleClientset()
for _, sm := range svcMonitors {
if sm != nil {
Expand All @@ -1057,6 +1127,24 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic
}
}

for _, prb := range probes {
if prb != nil {
_, err := mClient.MonitoringV1().Probes(prb.Namespace).Create(context.Background(), prb, metav1.CreateOptions{})
if err != nil {
t.Fatal(t, err)
}
}
}

for _, scc := range scrapeConfigs {
if scc != nil {
_, err := mClient.MonitoringV1alpha1().ScrapeConfigs(scc.Namespace).Create(context.Background(), scc, metav1.CreateOptions{})
if err != nil {
t.Fatal(t, err)
}
}
}

k8sClient := fake.NewSimpleClientset()
_, err := k8sClient.CoreV1().Secrets("test").Create(context.Background(), &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -1091,8 +1179,12 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic
ScrapeInterval: monitoringv1.Duration("30s"),
ServiceMonitorSelector: cfg.PrometheusCR.ServiceMonitorSelector,
PodMonitorSelector: cfg.PrometheusCR.PodMonitorSelector,
ProbeSelector: cfg.PrometheusCR.ProbeSelector,
ScrapeConfigSelector: cfg.PrometheusCR.ScrapeConfigSelector,
ServiceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector,
PodMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector,
ProbeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector,
ScrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector,
},
},
}
Expand Down Expand Up @@ -1132,6 +1224,8 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic
configGenerator: generator,
podMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector,
serviceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector,
probeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector,
scrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector,
resourceSelector: resourceSelector,
store: store,
}, source
Expand Down
2 changes: 2 additions & 0 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,8 @@ rules:
- monitoring.coreos.com
resources:
- podmonitors
- probes
- scrapeconfigs
- servicemonitors
verbs:
- create
Expand Down
7 changes: 6 additions & 1 deletion controllers/opentelemetrycollector_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/go-logr/logr"
routev1 "github.com/openshift/api/route/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -90,6 +91,8 @@ func (r *OpenTelemetryCollectorReconciler) findOtelOwnedObjects(ctx context.Cont
ownedObjectTypes = append(ownedObjectTypes,
&monitoringv1.ServiceMonitor{},
&monitoringv1.PodMonitor{},
&monitoringv1.Probe{},
&promv1alpha1.ScrapeConfig{},
)
}
if params.Config.OpenShiftRoutesAvailability() == openshift.RoutesAvailable {
Expand Down Expand Up @@ -205,7 +208,7 @@ func NewReconciler(p Params) *OpenTelemetryCollectorReconciler {
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors;podmonitors,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors;podmonitors;scrapeconfigs;probes,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes;routes/custom-host,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=config.openshift.io,resources=infrastructures;infrastructures/status,verbs=get;list;watch
Expand Down Expand Up @@ -311,6 +314,8 @@ func (r *OpenTelemetryCollectorReconciler) SetupWithManager(mgr ctrl.Manager) er
if featuregate.PrometheusOperatorIsAvailable.IsEnabled() && r.config.PrometheusCRAvailability() == prometheus.Available {
builder.Owns(&monitoringv1.ServiceMonitor{})
builder.Owns(&monitoringv1.PodMonitor{})
builder.Owns(&monitoringv1.Probe{})
builder.Owns(&promv1alpha1.ScrapeConfig{})
}
if r.config.OpenShiftRoutesAvailability() == openshift.RoutesAvailable {
builder.Owns(&routev1.Route{})
Expand Down
4 changes: 3 additions & 1 deletion controllers/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (

routev1 "github.com/openshift/api/route/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
Expand Down Expand Up @@ -133,14 +134,15 @@ func TestMain(m *testing.M) {
}

utilruntime.Must(monitoringv1.AddToScheme(testScheme))
utilruntime.Must(promv1alpha1.AddToScheme(testScheme))
utilruntime.Must(networkingv1.AddToScheme(testScheme))
utilruntime.Must(routev1.AddToScheme(testScheme))
utilruntime.Must(v1alpha1.AddToScheme(testScheme))
utilruntime.Must(v1beta1.AddToScheme(testScheme))

testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
CRDs: []*apiextensionsv1.CustomResourceDefinition{testdata.OpenShiftRouteCRD, testdata.ServiceMonitorCRD, testdata.PodMonitorCRD},
CRDs: []*apiextensionsv1.CustomResourceDefinition{testdata.OpenShiftRouteCRD, testdata.ServiceMonitorCRD, testdata.PodMonitorCRD, testdata.ProbeCRD, testdata.ScrapeConfigCRD},
WebhookInstallOptions: envtest.WebhookInstallOptions{
Paths: []string{filepath.Join("..", "config", "webhook")},
},
Expand Down
Loading
Loading