From 2d11eff30b60ce903d098dbbbfc49b89548c810c Mon Sep 17 00:00:00 2001 From: Andrzej Stencel Date: Wed, 6 Dec 2023 15:29:48 +0100 Subject: [PATCH 1/2] fix(metrics): use `sumologic.metrics.excludeNamespaceRegex` instead of `sumologic.logs.container.excludeNamespaceRegex` --- .changelog/3428.fixed.txt | 1 + deploy/helm/sumologic/README.md | 1 + .../conf/metrics/otelcol/processors.yaml | 2 +- .../sumologic/templates/_helpers/_metrics.tpl | 19 ++ deploy/helm/sumologic/values.yaml | 4 + .../exclude-namespace.input.yaml | 4 + .../exclude-namespace.output.yaml | 201 +++++++++++++++++ .../metadata_metrics_otc/templates.input.yaml | 15 ++ .../templates.output.yaml | 212 ++++++++++++++++++ 9 files changed, 458 insertions(+), 1 deletion(-) create mode 100644 .changelog/3428.fixed.txt create mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.input.yaml create mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.output.yaml create mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.input.yaml create mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.output.yaml diff --git a/.changelog/3428.fixed.txt b/.changelog/3428.fixed.txt new file mode 100644 index 0000000000..4428fc1d24 --- /dev/null +++ b/.changelog/3428.fixed.txt @@ -0,0 +1 @@ +fix(metrics): use `sumologic.metrics.excludeNamespaceRegex` instead of `sumologic.logs.container.excludeNamespaceRegex` \ No newline at end of file diff --git a/deploy/helm/sumologic/README.md b/deploy/helm/sumologic/README.md index 0f80f8671d..ae52cb0ae5 100644 --- a/deploy/helm/sumologic/README.md +++ b/deploy/helm/sumologic/README.md @@ -86,6 +86,7 @@ The following table lists the configurable parameters of the Sumo Logic chart an | `sumologic.logs.additionalFields` | Additional Fields to be created in Sumo Logic. [Sumo Logic help](https://help.sumologic.com/docs/manage/fields/#manage-fields) | `[]` | | `sumologic.logs.sourceType` | The type of the Sumo Logic source being used for logs ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `otlp` | | `sumologic.metrics.enabled` | Set the enabled flag to false for disabling metrics ingestion altogether. | `true` | +| `sumologic.metrics.excludeNamespaceRegex` | A regular expression for Kubernetes namespace names. Metrics matching this namespace will not be sent to Sumo. | `""` | | `sumologic.metrics.otelcol.extraProcessors` | Extra processors configuration for metrics pipeline. See [/docs/collecting-application-metrics.md#metrics-modifications](/docs/collecting-application-metrics.md#metrics-modifications) for more information. | `[]` | | `sumologic.metrics.remoteWriteProxy.enabled` | Enable a load balancing proxy for Prometheus remote writes. [See docs for more information.](/docs/prometheus.md#using-a-load-balancing-proxy-for-prometheus-remote-write) | `false` | | `sumologic.metrics.remoteWriteProxy.config.clientBodyBufferSize` | See the [nginx documentation](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size). Increase if you've also increased samples per send in Prometheus remote write. | `64k` | diff --git a/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml b/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml index 8d39f501a0..28ced10baa 100644 --- a/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml +++ b/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml @@ -175,7 +175,7 @@ routing: source: collector: {{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | quote }} exclude: - k8s.namespace.name: {{ include "logs.excludeNamespaces" . }} + k8s.namespace.name: {{ include "metrics.excludeNamespaces" . }} ## The Sumo Logic Schema processor modifies the metadata on logs, metrics and traces sent to Sumo Logic ## so that the Sumo Logic apps can make full use of the ingested data. diff --git a/deploy/helm/sumologic/templates/_helpers/_metrics.tpl b/deploy/helm/sumologic/templates/_helpers/_metrics.tpl index 1c795c9bf0..87ce4b980c 100644 --- a/deploy/helm/sumologic/templates/_helpers/_metrics.tpl +++ b/deploy/helm/sumologic/templates/_helpers/_metrics.tpl @@ -309,3 +309,22 @@ Example Usage: {{- define "metrics.collector.autoscaling.enabled" -}} {{- template "is.autoscaling.enabled" (dict "autoscalingEnabled" .Values.sumologic.metrics.collector.otelcol.autoscaling.enabled "Values" .Values) -}} {{- end -}} + +{{/* +Returns list of namespaces to exclude + +Example: + +{{ include "metrics.excludeNamespaces" . }} +*/}} +{{- define "metrics.excludeNamespaces" -}} +{{- $excludeNamespaceRegex := .Values.sumologic.metrics.excludeNamespaceRegex | quote -}} +{{- if eq .Values.sumologic.collectionMonitoring false -}} + {{- if .Values.sumologic.metrics.excludeNamespaceRegex -}} + {{- $excludeNamespaceRegex = printf "%s|%s" ( include "sumologic.namespace" . ) .Values.sumologic.metrics.excludeNamespaceRegex | quote -}} + {{- else -}} + {{- $excludeNamespaceRegex = printf "%s" ( include "sumologic.namespace" . ) | quote -}} + {{- end -}} +{{- end -}} +{{ print $excludeNamespaceRegex }} +{{- end -}} diff --git a/deploy/helm/sumologic/values.yaml b/deploy/helm/sumologic/values.yaml index 7e1252f2eb..32c28ee03e 100644 --- a/deploy/helm/sumologic/values.yaml +++ b/deploy/helm/sumologic/values.yaml @@ -552,6 +552,10 @@ sumologic: ## - kubelet_runtime_operations_duration_seconds dropHistogramBuckets: true + ## A regular expression for namespaces. + ## Metrics that match these namespaces will be excluded from Sumo. + excludeNamespaceRegex: "" + otelcol: ## Includes additional processors into pipelines. ## It can be used for filtering metrics, renaming, changing metadata and so on. diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.input.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.input.yaml new file mode 100644 index 0000000000..d6732648cf --- /dev/null +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.input.yaml @@ -0,0 +1,4 @@ +sumologic: + collectionMonitoring: false + metrics: + excludeNamespaceRegex: my_metrics_excludeNamespaceRegex diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.output.yaml new file mode 100644 index 0000000000..639435ceba --- /dev/null +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.output.yaml @@ -0,0 +1,201 @@ +--- +# Source: sumologic/templates/metrics/otelcol/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: RELEASE-NAME-sumologic-otelcol-metrics + namespace: sumologic + labels: + app: RELEASE-NAME-sumologic-otelcol-metrics + chart: "sumologic-%CURRENT_CHART_VERSION%" + release: "RELEASE-NAME" + heritage: "Helm" +data: + config.yaml: | + exporters: + sumologic/default: + decompose_otlp_histograms: true + endpoint: ${SUMO_ENDPOINT_DEFAULT_OTLP_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: otlp + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + extensions: + file_storage: + compaction: + directory: /tmp + on_rebound: true + directory: /var/lib/storage/otc + timeout: 10s + health_check: {} + pprof: {} + processors: + batch: + send_batch_max_size: 2048 + send_batch_size: 1024 + timeout: 1s + filter/drop_unnecessary_metrics: + error_mode: ignore + metrics: + metric: + - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") + - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + groupbyattrs: + keys: + - container + - namespace + - pod + - service + groupbyattrs/group_by_name: + keys: + - __name__ + - job + k8s_tagger: + extract: + delimiter: _ + labels: + - key: '*' + tag_name: pod_labels_%s + metadata: + - daemonSetName + - deploymentName + - nodeName + - replicaSetName + - serviceName + - statefulSetName + owner_lookup_enabled: true + passthrough: false + pod_association: + - from: build_hostname + memory_limiter: + check_interval: 5s + limit_percentage: 75 + spike_limit_percentage: 20 + metricstransform: + transforms: + action: update + include: ^prometheus_remote_write_(.*)$$ + match_type: regexp + new_name: $$1 + resource: + attributes: + - action: upsert + from_attribute: namespace + key: k8s.namespace.name + - action: delete + key: namespace + - action: upsert + from_attribute: pod + key: k8s.pod.name + - action: delete + key: pod + - action: upsert + from_attribute: container + key: k8s.container.name + - action: delete + key: container + - action: upsert + from_attribute: node + key: k8s.node.name + - action: delete + key: node + - action: upsert + from_attribute: service + key: prometheus_service + - action: delete + key: service + - action: upsert + from_attribute: service.name + key: job + - action: delete + key: service.name + - action: upsert + key: _origin + value: kubernetes + - action: upsert + key: cluster + value: kubernetes + resource/delete_source_metadata: + attributes: + - action: delete + key: _sourceCategory + - action: delete + key: _sourceHost + - action: delete + key: _sourceName + resource/remove_k8s_pod_pod_name: + attributes: + - action: delete + key: k8s.pod.pod_name + source: + collector: kubernetes + exclude: + k8s.namespace.name: sumologic|my_metrics_excludeNamespaceRegex + sumologic_schema: + add_cloud_namespace: false + transform/remove_name: + error_mode: ignore + metric_statements: + - context: resource + statements: + - delete_key(attributes, "__name__") + transform/set_name: + error_mode: ignore + metric_statements: + - context: datapoint + statements: + - set(attributes["__name__"], metric.name) where IsMatch(metric.name, "^cloudprovider_.*") + receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + telegraf: + agent_config: | + [agent] + interval = "30s" + flush_interval = "30s" + omit_hostname = true + [[inputs.http_listener_v2]] + # wait longer than prometheus + read_timeout = "30s" + write_timeout = "30s" + service_address = ":9888" + data_format = "prometheusremotewrite" + paths = [ + "/prometheus.metrics" + ] + service: + extensions: + - health_check + - file_storage + - pprof + pipelines: + metrics: + exporters: + - sumologic/default + processors: + - memory_limiter + - metricstransform + - groupbyattrs + - resource + - k8s_tagger + - source + - sumologic_schema + - resource/remove_k8s_pod_pod_name + - resource/delete_source_metadata + - transform/set_name + - groupbyattrs/group_by_name + - transform/remove_name + - filter/drop_unnecessary_metrics + - batch + receivers: + - telegraf + - otlp + telemetry: + logs: + level: info diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.input.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.input.yaml new file mode 100644 index 0000000000..572ef01a47 --- /dev/null +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.input.yaml @@ -0,0 +1,15 @@ +sumologic: + collectorName: my_collectorName + metrics: + excludeNamespaceRegex: my_metrics_excludeNamespaceRegex + otelcol: + extraProcessors: + - resource/add-metrics-resource-attribute: + attributes: + - action: insert + key: environment + value: staging + - resource/remove-metrics-resource-attribute: + attributes: + - action: delete + key: redundant-attribute diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.output.yaml new file mode 100644 index 0000000000..e64b57d252 --- /dev/null +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.output.yaml @@ -0,0 +1,212 @@ +--- +# Source: sumologic/templates/metrics/otelcol/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: RELEASE-NAME-sumologic-otelcol-metrics + namespace: sumologic + labels: + app: RELEASE-NAME-sumologic-otelcol-metrics + chart: "sumologic-%CURRENT_CHART_VERSION%" + release: "RELEASE-NAME" + heritage: "Helm" +data: + config.yaml: | + exporters: + sumologic/default: + decompose_otlp_histograms: true + endpoint: ${SUMO_ENDPOINT_DEFAULT_OTLP_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: otlp + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + extensions: + file_storage: + compaction: + directory: /tmp + on_rebound: true + directory: /var/lib/storage/otc + timeout: 10s + health_check: {} + pprof: {} + processors: + batch: + send_batch_max_size: 2048 + send_batch_size: 1024 + timeout: 1s + filter/drop_unnecessary_metrics: + error_mode: ignore + metrics: + metric: + - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") + - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + groupbyattrs: + keys: + - container + - namespace + - pod + - service + groupbyattrs/group_by_name: + keys: + - __name__ + - job + k8s_tagger: + extract: + delimiter: _ + labels: + - key: '*' + tag_name: pod_labels_%s + metadata: + - daemonSetName + - deploymentName + - nodeName + - replicaSetName + - serviceName + - statefulSetName + owner_lookup_enabled: true + passthrough: false + pod_association: + - from: build_hostname + memory_limiter: + check_interval: 5s + limit_percentage: 75 + spike_limit_percentage: 20 + metricstransform: + transforms: + action: update + include: ^prometheus_remote_write_(.*)$$ + match_type: regexp + new_name: $$1 + resource: + attributes: + - action: upsert + from_attribute: namespace + key: k8s.namespace.name + - action: delete + key: namespace + - action: upsert + from_attribute: pod + key: k8s.pod.name + - action: delete + key: pod + - action: upsert + from_attribute: container + key: k8s.container.name + - action: delete + key: container + - action: upsert + from_attribute: node + key: k8s.node.name + - action: delete + key: node + - action: upsert + from_attribute: service + key: prometheus_service + - action: delete + key: service + - action: upsert + from_attribute: service.name + key: job + - action: delete + key: service.name + - action: upsert + key: _origin + value: kubernetes + - action: upsert + key: cluster + value: kubernetes + resource/add-metrics-resource-attribute: + attributes: + - action: insert + key: environment + value: staging + resource/delete_source_metadata: + attributes: + - action: delete + key: _sourceCategory + - action: delete + key: _sourceHost + - action: delete + key: _sourceName + resource/remove-metrics-resource-attribute: + attributes: + - action: delete + key: redundant-attribute + resource/remove_k8s_pod_pod_name: + attributes: + - action: delete + key: k8s.pod.pod_name + source: + collector: my_collectorName + exclude: + k8s.namespace.name: my_metrics_excludeNamespaceRegex + sumologic_schema: + add_cloud_namespace: false + transform/remove_name: + error_mode: ignore + metric_statements: + - context: resource + statements: + - delete_key(attributes, "__name__") + transform/set_name: + error_mode: ignore + metric_statements: + - context: datapoint + statements: + - set(attributes["__name__"], metric.name) where IsMatch(metric.name, "^cloudprovider_.*") + receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + telegraf: + agent_config: | + [agent] + interval = "30s" + flush_interval = "30s" + omit_hostname = true + [[inputs.http_listener_v2]] + # wait longer than prometheus + read_timeout = "30s" + write_timeout = "30s" + service_address = ":9888" + data_format = "prometheusremotewrite" + paths = [ + "/prometheus.metrics" + ] + service: + extensions: + - health_check + - file_storage + - pprof + pipelines: + metrics: + exporters: + - sumologic/default + processors: + - memory_limiter + - metricstransform + - groupbyattrs + - resource + - k8s_tagger + - source + - sumologic_schema + - resource/add-metrics-resource-attribute + - resource/remove-metrics-resource-attribute + - resource/remove_k8s_pod_pod_name + - resource/delete_source_metadata + - transform/set_name + - groupbyattrs/group_by_name + - transform/remove_name + - filter/drop_unnecessary_metrics + - batch + receivers: + - telegraf + - otlp + telemetry: + logs: + level: info From 2dfc1b4882aba23bb3836f3de1cae16d6115a833 Mon Sep 17 00:00:00 2001 From: Andrzej Stencel Date: Thu, 7 Dec 2023 10:42:58 +0100 Subject: [PATCH 2/2] test: rewrite tests in Golang --- tests/helm/logs_test.go | 77 +++++++ tests/helm/metrics_test.go | 74 ++++++ .../exclude-namespace.input.yaml | 4 - .../exclude-namespace.output.yaml | 201 ----------------- .../metadata_metrics_otc/templates.input.yaml | 15 -- .../templates.output.yaml | 212 ------------------ 6 files changed, 151 insertions(+), 432 deletions(-) delete mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.input.yaml delete mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.output.yaml delete mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.input.yaml delete mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.output.yaml diff --git a/tests/helm/logs_test.go b/tests/helm/logs_test.go index 45c0bc1985..4654a9c0fe 100644 --- a/tests/helm/logs_test.go +++ b/tests/helm/logs_test.go @@ -488,3 +488,80 @@ sumologic: } require.True(t, keepTimeOperatorFound) } + +func TestLogsCollectionMonitoring(t *testing.T) { + t.Parallel() + templatePath := "templates/logs/otelcol/configmap.yaml" + valuesYaml := ` +sumologic: + collectionMonitoring: false +` + otelConfigYaml := GetOtelConfigYaml(t, valuesYaml, templatePath) + + var otelConfig struct { + Processors struct { + SourceContainers struct { + Exclude struct { + Namespace string + } + } `yaml:"source/containers"` + } + } + err := yaml.Unmarshal([]byte(otelConfigYaml), &otelConfig) + require.NoError(t, err) + + require.Equal(t, "sumologic", otelConfig.Processors.SourceContainers.Exclude.Namespace) +} + +func TestLogsExcludeNamespaceRegex(t *testing.T) { + t.Parallel() + templatePath := "templates/logs/otelcol/configmap.yaml" + valuesYaml := ` +sumologic: + logs: + container: + excludeNamespaceRegex: my_logs_namespace +` + otelConfigYaml := GetOtelConfigYaml(t, valuesYaml, templatePath) + + var otelConfig struct { + Processors struct { + SourceContainers struct { + Exclude struct { + Namespace string + } + } `yaml:"source/containers"` + } + } + err := yaml.Unmarshal([]byte(otelConfigYaml), &otelConfig) + require.NoError(t, err) + + require.Equal(t, "my_logs_namespace", otelConfig.Processors.SourceContainers.Exclude.Namespace) +} + +func TestLogsExcludeNamespaceRegexWithCollectionMonitoring(t *testing.T) { + t.Parallel() + templatePath := "templates/logs/otelcol/configmap.yaml" + valuesYaml := ` +sumologic: + collectionMonitoring: false + logs: + container: + excludeNamespaceRegex: my_logs_namespace +` + otelConfigYaml := GetOtelConfigYaml(t, valuesYaml, templatePath) + + var otelConfig struct { + Processors struct { + SourceContainers struct { + Exclude struct { + Namespace string + } + } `yaml:"source/containers"` + } + } + err := yaml.Unmarshal([]byte(otelConfigYaml), &otelConfig) + require.NoError(t, err) + + require.Equal(t, "sumologic|my_logs_namespace", otelConfig.Processors.SourceContainers.Exclude.Namespace) +} diff --git a/tests/helm/metrics_test.go b/tests/helm/metrics_test.go index 54ff9b7115..a68d6e7db9 100644 --- a/tests/helm/metrics_test.go +++ b/tests/helm/metrics_test.go @@ -332,5 +332,79 @@ sumologic: assert.Equal(t, tt.ExpectedNames, names) }) } +} + +func TestMetricsCollectionMonitoring(t *testing.T) { + t.Parallel() + templatePath := "templates/metrics/otelcol/configmap.yaml" + valuesYaml := ` +sumologic: + collectionMonitoring: false +` + otelConfigYaml := GetOtelConfigYaml(t, valuesYaml, templatePath) + + var otelConfig struct { + Processors struct { + Source struct { + Exclude struct { + K8sNamespaceName string `yaml:"k8s.namespace.name"` + } + } + } + } + err := yaml.Unmarshal([]byte(otelConfigYaml), &otelConfig) + require.NoError(t, err) + + require.Equal(t, "sumologic", otelConfig.Processors.Source.Exclude.K8sNamespaceName) +} + +func TestMetricsExcludeNamespaceRegex(t *testing.T) { + t.Parallel() + templatePath := "templates/metrics/otelcol/configmap.yaml" + valuesYaml := ` +sumologic: + metrics: + excludeNamespaceRegex: my_metrics_namespace +` + otelConfigYaml := GetOtelConfigYaml(t, valuesYaml, templatePath) + + var otelConfig struct { + Processors struct { + Source struct { + Exclude struct { + K8sNamespaceName string `yaml:"k8s.namespace.name"` + } + } + } + } + err := yaml.Unmarshal([]byte(otelConfigYaml), &otelConfig) + require.NoError(t, err) + + require.Equal(t, "my_metrics_namespace", otelConfig.Processors.Source.Exclude.K8sNamespaceName) +} + +func TestMetricsExcludeNamespaceRegexWithCollectionMonitoring(t *testing.T) { + t.Parallel() + templatePath := "templates/metrics/otelcol/configmap.yaml" + valuesYaml := ` +sumologic: + collectionMonitoring: false + metrics: + excludeNamespaceRegex: my_metrics_namespace +` + otelConfigYaml := GetOtelConfigYaml(t, valuesYaml, templatePath) + + var otelConfig struct { + Processors struct { + Source struct { + Exclude struct { + K8sNamespaceName string `yaml:"k8s.namespace.name"` + } + } + } + } + err := yaml.Unmarshal([]byte(otelConfigYaml), &otelConfig) + require.NoError(t, err) + require.Equal(t, "sumologic|my_metrics_namespace", otelConfig.Processors.Source.Exclude.K8sNamespaceName) } diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.input.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.input.yaml deleted file mode 100644 index d6732648cf..0000000000 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.input.yaml +++ /dev/null @@ -1,4 +0,0 @@ -sumologic: - collectionMonitoring: false - metrics: - excludeNamespaceRegex: my_metrics_excludeNamespaceRegex diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.output.yaml deleted file mode 100644 index 639435ceba..0000000000 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/exclude-namespace.output.yaml +++ /dev/null @@ -1,201 +0,0 @@ ---- -# Source: sumologic/templates/metrics/otelcol/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: RELEASE-NAME-sumologic-otelcol-metrics - namespace: sumologic - labels: - app: RELEASE-NAME-sumologic-otelcol-metrics - chart: "sumologic-%CURRENT_CHART_VERSION%" - release: "RELEASE-NAME" - heritage: "Helm" -data: - config.yaml: | - exporters: - sumologic/default: - decompose_otlp_histograms: true - endpoint: ${SUMO_ENDPOINT_DEFAULT_OTLP_METRICS_SOURCE} - max_request_body_size: 16777216 - metric_format: otlp - sending_queue: - enabled: true - num_consumers: 10 - queue_size: 10000 - storage: file_storage - timeout: 30s - extensions: - file_storage: - compaction: - directory: /tmp - on_rebound: true - directory: /var/lib/storage/otc - timeout: 10s - health_check: {} - pprof: {} - processors: - batch: - send_batch_max_size: 2048 - send_batch_size: 1024 - timeout: 1s - filter/drop_unnecessary_metrics: - error_mode: ignore - metrics: - metric: - - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") - groupbyattrs: - keys: - - container - - namespace - - pod - - service - groupbyattrs/group_by_name: - keys: - - __name__ - - job - k8s_tagger: - extract: - delimiter: _ - labels: - - key: '*' - tag_name: pod_labels_%s - metadata: - - daemonSetName - - deploymentName - - nodeName - - replicaSetName - - serviceName - - statefulSetName - owner_lookup_enabled: true - passthrough: false - pod_association: - - from: build_hostname - memory_limiter: - check_interval: 5s - limit_percentage: 75 - spike_limit_percentage: 20 - metricstransform: - transforms: - action: update - include: ^prometheus_remote_write_(.*)$$ - match_type: regexp - new_name: $$1 - resource: - attributes: - - action: upsert - from_attribute: namespace - key: k8s.namespace.name - - action: delete - key: namespace - - action: upsert - from_attribute: pod - key: k8s.pod.name - - action: delete - key: pod - - action: upsert - from_attribute: container - key: k8s.container.name - - action: delete - key: container - - action: upsert - from_attribute: node - key: k8s.node.name - - action: delete - key: node - - action: upsert - from_attribute: service - key: prometheus_service - - action: delete - key: service - - action: upsert - from_attribute: service.name - key: job - - action: delete - key: service.name - - action: upsert - key: _origin - value: kubernetes - - action: upsert - key: cluster - value: kubernetes - resource/delete_source_metadata: - attributes: - - action: delete - key: _sourceCategory - - action: delete - key: _sourceHost - - action: delete - key: _sourceName - resource/remove_k8s_pod_pod_name: - attributes: - - action: delete - key: k8s.pod.pod_name - source: - collector: kubernetes - exclude: - k8s.namespace.name: sumologic|my_metrics_excludeNamespaceRegex - sumologic_schema: - add_cloud_namespace: false - transform/remove_name: - error_mode: ignore - metric_statements: - - context: resource - statements: - - delete_key(attributes, "__name__") - transform/set_name: - error_mode: ignore - metric_statements: - - context: datapoint - statements: - - set(attributes["__name__"], metric.name) where IsMatch(metric.name, "^cloudprovider_.*") - receivers: - otlp: - protocols: - http: - endpoint: 0.0.0.0:4318 - telegraf: - agent_config: | - [agent] - interval = "30s" - flush_interval = "30s" - omit_hostname = true - [[inputs.http_listener_v2]] - # wait longer than prometheus - read_timeout = "30s" - write_timeout = "30s" - service_address = ":9888" - data_format = "prometheusremotewrite" - paths = [ - "/prometheus.metrics" - ] - service: - extensions: - - health_check - - file_storage - - pprof - pipelines: - metrics: - exporters: - - sumologic/default - processors: - - memory_limiter - - metricstransform - - groupbyattrs - - resource - - k8s_tagger - - source - - sumologic_schema - - resource/remove_k8s_pod_pod_name - - resource/delete_source_metadata - - transform/set_name - - groupbyattrs/group_by_name - - transform/remove_name - - filter/drop_unnecessary_metrics - - batch - receivers: - - telegraf - - otlp - telemetry: - logs: - level: info diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.input.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.input.yaml deleted file mode 100644 index 572ef01a47..0000000000 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.input.yaml +++ /dev/null @@ -1,15 +0,0 @@ -sumologic: - collectorName: my_collectorName - metrics: - excludeNamespaceRegex: my_metrics_excludeNamespaceRegex - otelcol: - extraProcessors: - - resource/add-metrics-resource-attribute: - attributes: - - action: insert - key: environment - value: staging - - resource/remove-metrics-resource-attribute: - attributes: - - action: delete - key: redundant-attribute diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.output.yaml deleted file mode 100644 index e64b57d252..0000000000 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/templates.output.yaml +++ /dev/null @@ -1,212 +0,0 @@ ---- -# Source: sumologic/templates/metrics/otelcol/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: RELEASE-NAME-sumologic-otelcol-metrics - namespace: sumologic - labels: - app: RELEASE-NAME-sumologic-otelcol-metrics - chart: "sumologic-%CURRENT_CHART_VERSION%" - release: "RELEASE-NAME" - heritage: "Helm" -data: - config.yaml: | - exporters: - sumologic/default: - decompose_otlp_histograms: true - endpoint: ${SUMO_ENDPOINT_DEFAULT_OTLP_METRICS_SOURCE} - max_request_body_size: 16777216 - metric_format: otlp - sending_queue: - enabled: true - num_consumers: 10 - queue_size: 10000 - storage: file_storage - timeout: 30s - extensions: - file_storage: - compaction: - directory: /tmp - on_rebound: true - directory: /var/lib/storage/otc - timeout: 10s - health_check: {} - pprof: {} - processors: - batch: - send_batch_max_size: 2048 - send_batch_size: 1024 - timeout: 1s - filter/drop_unnecessary_metrics: - error_mode: ignore - metrics: - metric: - - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") - groupbyattrs: - keys: - - container - - namespace - - pod - - service - groupbyattrs/group_by_name: - keys: - - __name__ - - job - k8s_tagger: - extract: - delimiter: _ - labels: - - key: '*' - tag_name: pod_labels_%s - metadata: - - daemonSetName - - deploymentName - - nodeName - - replicaSetName - - serviceName - - statefulSetName - owner_lookup_enabled: true - passthrough: false - pod_association: - - from: build_hostname - memory_limiter: - check_interval: 5s - limit_percentage: 75 - spike_limit_percentage: 20 - metricstransform: - transforms: - action: update - include: ^prometheus_remote_write_(.*)$$ - match_type: regexp - new_name: $$1 - resource: - attributes: - - action: upsert - from_attribute: namespace - key: k8s.namespace.name - - action: delete - key: namespace - - action: upsert - from_attribute: pod - key: k8s.pod.name - - action: delete - key: pod - - action: upsert - from_attribute: container - key: k8s.container.name - - action: delete - key: container - - action: upsert - from_attribute: node - key: k8s.node.name - - action: delete - key: node - - action: upsert - from_attribute: service - key: prometheus_service - - action: delete - key: service - - action: upsert - from_attribute: service.name - key: job - - action: delete - key: service.name - - action: upsert - key: _origin - value: kubernetes - - action: upsert - key: cluster - value: kubernetes - resource/add-metrics-resource-attribute: - attributes: - - action: insert - key: environment - value: staging - resource/delete_source_metadata: - attributes: - - action: delete - key: _sourceCategory - - action: delete - key: _sourceHost - - action: delete - key: _sourceName - resource/remove-metrics-resource-attribute: - attributes: - - action: delete - key: redundant-attribute - resource/remove_k8s_pod_pod_name: - attributes: - - action: delete - key: k8s.pod.pod_name - source: - collector: my_collectorName - exclude: - k8s.namespace.name: my_metrics_excludeNamespaceRegex - sumologic_schema: - add_cloud_namespace: false - transform/remove_name: - error_mode: ignore - metric_statements: - - context: resource - statements: - - delete_key(attributes, "__name__") - transform/set_name: - error_mode: ignore - metric_statements: - - context: datapoint - statements: - - set(attributes["__name__"], metric.name) where IsMatch(metric.name, "^cloudprovider_.*") - receivers: - otlp: - protocols: - http: - endpoint: 0.0.0.0:4318 - telegraf: - agent_config: | - [agent] - interval = "30s" - flush_interval = "30s" - omit_hostname = true - [[inputs.http_listener_v2]] - # wait longer than prometheus - read_timeout = "30s" - write_timeout = "30s" - service_address = ":9888" - data_format = "prometheusremotewrite" - paths = [ - "/prometheus.metrics" - ] - service: - extensions: - - health_check - - file_storage - - pprof - pipelines: - metrics: - exporters: - - sumologic/default - processors: - - memory_limiter - - metricstransform - - groupbyattrs - - resource - - k8s_tagger - - source - - sumologic_schema - - resource/add-metrics-resource-attribute - - resource/remove-metrics-resource-attribute - - resource/remove_k8s_pod_pod_name - - resource/delete_source_metadata - - transform/set_name - - groupbyattrs/group_by_name - - transform/remove_name - - filter/drop_unnecessary_metrics - - batch - receivers: - - telegraf - - otlp - telemetry: - logs: - level: info