From d8dbd0f914dd72cd57978f2320d486d4bdcf1ca4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20R=C3=BChsen?= Date: Tue, 18 Feb 2025 12:24:35 +0100 Subject: [PATCH] Add intrange linter --- .golangci.yml | 1 + cmd/telemetrygen/pkg/logs/logs.go | 2 +- cmd/telemetrygen/pkg/metrics/metrics.go | 2 +- cmd/telemetrygen/pkg/metrics/worker_test.go | 12 +- cmd/telemetrygen/pkg/traces/traces.go | 2 +- cmd/telemetrygen/pkg/traces/worker.go | 4 +- connector/countconnector/connector.go | 30 ++--- connector/datadogconnector/benchmark_test.go | 2 +- connector/datadogconnector/connector.go | 2 +- .../exceptionsconnector/connector_logs.go | 8 +- .../exceptionsconnector/connector_metrics.go | 10 +- .../connector_metrics_test.go | 4 +- .../internal/state/pipeline_selector.go | 6 +- connector/grafanacloudconnector/connector.go | 2 +- .../host_metrics_test.go | 2 +- connector/otlpjsonconnector/connector_test.go | 2 +- connector/otlpjsonconnector/logs.go | 6 +- connector/otlpjsonconnector/metrics.go | 6 +- connector/otlpjsonconnector/traces.go | 6 +- connector/servicegraphconnector/connector.go | 6 +- .../internal/store/store_test.go | 2 +- connector/spanmetricsconnector/connector.go | 8 +- .../spanmetricsconnector/connector_test.go | 78 +++++------ .../internal/metrics/metrics.go | 8 +- examples/demo/client/main.go | 2 +- .../alertmanager_exporter.go | 8 +- .../alertmanager_exporter_test.go | 2 +- .../logs_exporter_test.go | 2 +- .../logsdata_to_logservice.go | 6 +- .../logsdata_to_logservice_test.go | 4 +- .../metricsdata_to_logservice.go | 20 +-- .../metricsdata_to_logservice_test.go | 2 +- .../tracedata_to_logservice.go | 10 +- .../tracedata_to_logservice_test.go | 2 +- .../awscloudwatchlogsexporter/config_test.go | 2 +- .../awscloudwatchlogsexporter/exporter.go | 6 +- .../exporter_test.go | 2 +- exporter/awsemfexporter/config_test.go | 2 +- exporter/awsemfexporter/datapoint.go | 4 +- exporter/awsemfexporter/datapoint_test.go | 16 +-- exporter/awsemfexporter/emf_exporter.go | 4 +- exporter/awsemfexporter/grouped_metric.go | 2 +- .../awsemfexporter/grouped_metric_test.go | 14 +- exporter/awsemfexporter/metric_translator.go | 4 +- .../awsemfexporter/metric_translator_test.go | 20 +-- .../internal/batch/batch_test.go | 8 +- .../internal/batch/encode_marshaler.go | 6 +- .../internal/batch/encoder_test.go | 6 +- .../internal/compress/compresser_test.go | 8 +- .../internal/producer/batcher_bench_test.go | 4 +- .../internal/producer/batcher_test.go | 2 +- exporter/awss3exporter/body_marshaler.go | 6 +- .../internal/upload/partition_test.go | 2 +- exporter/awss3exporter/sumo_marshaler.go | 6 +- exporter/awsxrayexporter/awsxray.go | 6 +- exporter/awsxrayexporter/awsxray_test.go | 2 +- .../internal/translator/aws.go | 4 +- .../internal/translator/cause.go | 4 +- .../internal/translator/http.go | 2 +- .../internal/translator/segment.go | 2 +- .../internal/translator/span_links.go | 2 +- .../internal/translator/writer_pool_test.go | 4 +- .../azuredataexplorerexporter/adx_exporter.go | 12 +- .../adx_exporter_test.go | 2 +- .../azuredataexplorerexporter/e2e_test.go | 6 +- .../metricsdata_to_adx.go | 18 +-- .../tracesdata_to_adx.go | 4 +- exporter/azuremonitorexporter/logexporter.go | 6 +- .../metric_to_envelopes.go | 8 +- .../azuremonitorexporter/metricexporter.go | 6 +- .../azuremonitorexporter/trace_to_envelope.go | 2 +- .../azuremonitorexporter/traceiteration.go | 6 +- .../operationsmanagement/metrics_producer.go | 10 +- exporter/carbonexporter/exporter_test.go | 10 +- .../carbonexporter/metricdata_to_plaintext.go | 18 +-- .../metricdata_to_plaintext_test.go | 2 +- exporter/cassandraexporter/exporter_logs.go | 6 +- exporter/cassandraexporter/exporter_traces.go | 6 +- exporter/clickhouseexporter/exporter_logs.go | 6 +- .../clickhouseexporter/exporter_logs_test.go | 6 +- .../clickhouseexporter/exporter_metrics.go | 6 +- .../exporter_metrics_test.go | 8 +- .../clickhouseexporter/exporter_traces.go | 10 +- .../exporter_traces_test.go | 2 +- .../internal/exponential_histogram_metrics.go | 2 +- .../internal/gauge_metrics.go | 2 +- .../internal/histogram_metrics.go | 2 +- .../internal/metrics_model.go | 6 +- .../internal/sum_metrics.go | 2 +- .../internal/summary_metrics.go | 2 +- exporter/coralogixexporter/logs_client.go | 2 +- exporter/coralogixexporter/metrics_client.go | 2 +- exporter/coralogixexporter/traces_client.go | 2 +- exporter/datadogexporter/factory.go | 8 +- .../integrationtest/integration_test.go | 4 +- .../metrics/sketches/sketches_test.go | 4 +- exporter/datadogexporter/logs_exporter.go | 2 +- exporter/datadogexporter/metrics_exporter.go | 2 +- exporter/datadogexporter/traces_exporter.go | 4 +- exporter/datasetexporter/logs_exporter.go | 6 +- .../logs_exporter_stress_test.go | 4 +- exporter/datasetexporter/traces_exporter.go | 6 +- exporter/dorisexporter/exporter_logs.go | 6 +- exporter/dorisexporter/exporter_logs_test.go | 2 +- exporter/dorisexporter/exporter_metrics.go | 6 +- .../dorisexporter/exporter_metrics_test.go | 6 +- exporter/dorisexporter/exporter_traces.go | 10 +- .../dorisexporter/exporter_traces_test.go | 2 +- .../metrics_exponential_histogram.go | 8 +- exporter/dorisexporter/metrics_gauge.go | 4 +- exporter/dorisexporter/metrics_histogram.go | 8 +- exporter/dorisexporter/metrics_sum.go | 4 +- exporter/dorisexporter/metrics_summary.go | 4 +- exporter/elasticsearchexporter/bulkindexer.go | 2 +- exporter/elasticsearchexporter/exporter.go | 36 +++--- .../elasticsearchexporter/exporter_test.go | 4 +- .../integrationtest/exporter_bench_test.go | 6 +- .../internal/datapoints/histogram.go | 2 +- .../internal/exphistogram/exphistogram.go | 2 +- .../internal/objmodel/objmodel.go | 10 +- .../internal/serializer/map.go | 2 +- .../serializer/otelserializer/logs.go | 2 +- .../serializeprofiles/benchmark_test.go | 2 +- .../serializeprofiles/transform.go | 12 +- .../serializer/otelserializer/traces.go | 2 +- .../elasticsearchexporter/metric_grouping.go | 2 +- exporter/elasticsearchexporter/model.go | 2 +- exporter/elasticsearchexporter/model_test.go | 4 +- exporter/fileexporter/buffered_writer_test.go | 4 +- .../fileexporter/grouping_file_exporter.go | 8 +- .../grouping_file_exporter_test.go | 10 +- .../googlecloudpubsubexporter/watermark.go | 28 ++-- exporter/kafkaexporter/kafka_exporter.go | 2 +- exporter/kafkaexporter/marshaler_test.go | 4 +- exporter/kafkaexporter/pdata_marshaler.go | 4 +- exporter/kafkaexporter/raw_marshaler.go | 6 +- exporter/kineticaexporter/common.go | 2 +- .../kineticaexporter/exporter_metric_test.go | 8 +- exporter/kineticaexporter/metrics_exporter.go | 34 ++--- .../consistent_hashing.go | 2 +- .../loadbalancingexporter/helpers_test.go | 4 +- .../loadbalancingexporter/metrics_exporter.go | 26 ++-- .../metrics_exporter_test.go | 12 +- .../loadbalancingexporter/trace_exporter.go | 2 +- .../trace_exporter_test.go | 8 +- .../internal/testutil/logs.go | 2 +- .../logicmonitorexporter/logs_exporter.go | 6 +- exporter/logzioexporter/exporter.go | 6 +- exporter/logzioexporter/exporter_test.go | 4 +- .../logzioexporter/internal/cache/lru_test.go | 4 +- .../lokiexporter/internal/tenant/attribute.go | 2 +- exporter/mezmoexporter/exporter.go | 6 +- exporter/mezmoexporter/exporter_test.go | 6 +- exporter/opencensusexporter/opencensus.go | 12 +- .../opencensusexporter/opencensus_test.go | 4 +- exporter/opensearchexporter/encoder.go | 4 +- .../internal/objmodel/objmodel.go | 8 +- .../opensearchexporter/log_bulk_indexer.go | 6 +- .../opensearchexporter/trace_bulk_indexer.go | 6 +- .../internal/arrow/bestofn.go | 8 +- .../internal/arrow/exporter_test.go | 12 +- exporter/otelarrowexporter/metadata_test.go | 6 +- exporter/prometheusexporter/accumulator.go | 14 +- .../prometheusexporter/accumulator_test.go | 20 +-- exporter/prometheusexporter/collector.go | 6 +- exporter/prometheusexporter/prometheus.go | 2 +- .../prometheusexporter/prometheus_test.go | 2 +- .../prometheusremotewriteexporter/exporter.go | 2 +- .../exporter_concurrency_test.go | 4 +- .../exporter_test.go | 12 +- .../helper_test.go | 8 +- .../testutil_test.go | 4 +- exporter/prometheusremotewriteexporter/wal.go | 2 +- .../prometheusremotewriteexporter/wal_test.go | 2 +- exporter/sapmexporter/exporter_test.go | 4 +- exporter/sentryexporter/sentry_exporter.go | 8 +- exporter/signalfxexporter/eventclient.go | 4 +- exporter/signalfxexporter/exporter_test.go | 16 +-- exporter/signalfxexporter/factory_test.go | 4 +- .../internal/apm/tracetracker/tracker.go | 2 +- .../internal/dimensions/dimclient_test.go | 2 +- .../internal/translation/converter.go | 6 +- .../internal/translation/converter_test.go | 4 +- .../translation/logdata_to_signalfxv2.go | 2 +- .../translation/logdata_to_signalfxv2_test.go | 2 +- .../internal/utils/histogram_utils.go | 6 +- .../internal/utils/histogram_utils_test.go | 6 +- exporter/splunkhecexporter/batchperscope.go | 10 +- exporter/splunkhecexporter/client.go | 8 +- exporter/splunkhecexporter/client_test.go | 26 ++-- .../internal/integrationtestutils/splunk.go | 2 +- .../splunkhecexporter/metricdata_to_splunk.go | 12 +- .../splunkhecexporter/tracedata_to_splunk.go | 4 +- exporter/stefexporter/exporter_test.go | 2 +- exporter/sumologicexporter/exporter.go | 4 +- exporter/sumologicexporter/exporter_test.go | 4 +- exporter/sumologicexporter/fields_test.go | 2 +- exporter/sumologicexporter/otlp.go | 20 +-- .../sumologicexporter/prometheus_formatter.go | 12 +- .../prometheus_formatter_test.go | 2 +- exporter/sumologicexporter/sender.go | 10 +- exporter/sumologicexporter/sender_test.go | 4 +- exporter/syslogexporter/exporter.go | 12 +- .../logs_exporter_test.go | 2 +- .../logsdata_to_logservice.go | 6 +- .../logsdata_to_logservice_test.go | 4 +- extension/ackextension/inmemory_test.go | 48 +++---- .../jsonlogencodingextension/extension.go | 12 +- .../otlpencodingextension/extension_test.go | 8 +- .../encoding/textencodingextension/text.go | 6 +- .../jaegerremotesampling/extension_test.go | 2 +- .../source/filesource/filesource_test.go | 4 +- .../remote_strategy_cache_test.go | 4 +- .../ecsobserver/internal/ecsmock/service.go | 12 +- .../internal/ecsmock/service_test.go | 4 +- extension/storage/dbstorage/extension_test.go | 10 +- extension/storage/filestorage/client_test.go | 42 +++--- .../storage/filestorage/extension_test.go | 15 +-- .../redisstorageextension/extension_test.go | 10 +- internal/aws/containerinsight/utils_test.go | 4 +- internal/aws/cwlogs/pusher_test.go | 4 +- .../aws/metrics/metric_calculator_test.go | 6 +- .../coreinternal/aggregateutil/aggregate.go | 24 ++-- .../coreinternal/goldendataset/metrics_gen.go | 20 +-- .../goldendataset/span_generator.go | 6 +- .../goldendataset/traces_generator.go | 2 +- .../metricstestutil/metric_diff.go | 18 +-- internal/coreinternal/parseutils/uri_test.go | 6 +- internal/exp/metrics/metrics.go | 14 +- internal/exp/metrics/metrics_test.go | 8 +- internal/filter/filterexpr/matcher.go | 10 +- internal/filter/filterexpr/matcher_test.go | 4 +- internal/filter/filterlog/filterlog_test.go | 2 +- .../filtermatcher/attributematcher_test.go | 2 +- internal/filter/filterottl/functions.go | 8 +- internal/filter/filterspan/filterspan_test.go | 2 +- .../k8snode/metadata_test.go | 2 +- .../otelarrow/admission2/notification_test.go | 2 +- .../otelarrow/compression/zstd/mru_test.go | 6 +- internal/otelarrow/netstats/netstats_test.go | 6 +- internal/otelarrow/test/e2e_test.go | 2 +- internal/pdatautil/logs.go | 24 ++-- internal/pdatautil/logs_test.go | 6 +- internal/sqlquery/db_client_test.go | 2 +- .../batchperresourceattr.go | 6 +- .../batchperresourceattr_test.go | 12 +- pkg/batchpersignal/batchpersignal.go | 18 +-- pkg/golden/normalize_timestamps.go | 10 +- pkg/golden/sort_metrics.go | 26 ++-- pkg/ottl/compare_test.go | 26 ++-- pkg/ottl/contexts/internal/logging/logging.go | 22 ++-- pkg/ottl/contexts/internal/value.go | 8 +- pkg/ottl/e2e/e2e_test.go | 2 +- pkg/ottl/expression.go | 2 +- pkg/ottl/functions.go | 6 +- pkg/ottl/functions_test.go | 2 +- pkg/ottl/ottlfuncs/func_flatten.go | 4 +- pkg/ottl/ottlfuncs/func_len_test.go | 36 +++--- pkg/ottl/ottlfuncs/func_parse_json_test.go | 2 +- pkg/ottl/ottlfuncs/func_sort.go | 4 +- pkg/pdatatest/plogtest/logs.go | 20 +-- pkg/pdatatest/plogtest/options.go | 26 ++-- pkg/pdatatest/pmetrictest/metrics.go | 54 ++++---- pkg/pdatatest/pmetrictest/options.go | 122 +++++++++--------- pkg/pdatatest/pprofiletest/options.go | 26 ++-- pkg/pdatatest/pprofiletest/profiles.go | 68 +++++----- pkg/pdatatest/ptracetest/options.go | 50 +++---- pkg/pdatatest/ptracetest/traces.go | 32 ++--- pkg/pdatautil/hash.go | 2 +- pkg/pdatautil/hash_test.go | 10 +- .../resource_to_telemetry.go | 14 +- .../resource_to_telemetry_test.go | 4 +- pkg/sampling/encoding_test.go | 2 +- pkg/stanza/adapter/benchmark_test.go | 4 +- pkg/stanza/adapter/converter_test.go | 22 ++-- pkg/stanza/adapter/frompdataconverter.go | 8 +- pkg/stanza/adapter/frompdataconverter_test.go | 4 +- pkg/stanza/adapter/integration_test.go | 4 +- pkg/stanza/adapter/receiver_test.go | 8 +- pkg/stanza/fileconsumer/attrs/attrs_test.go | 2 +- pkg/stanza/fileconsumer/benchmark_test.go | 8 +- pkg/stanza/fileconsumer/file_test.go | 46 +++---- .../internal/checkpoint/checkpoint.go | 2 +- .../fileconsumer/internal/emittest/sink.go | 6 +- .../internal/emittest/sink_test.go | 12 +- .../internal/fingerprint/fingerprint.go | 2 +- .../internal/reader/reader_test.go | 4 +- .../fileconsumer/internal/tracker/tracker.go | 10 +- .../internal/tracker/tracker_test.go | 14 +- .../matcher/internal/filter/filter_test.go | 2 +- pkg/stanza/fileconsumer/rotation_test.go | 24 ++-- pkg/stanza/operator/helper/emitter_test.go | 2 +- pkg/stanza/operator/helper/expr_string.go | 2 +- .../operator/helper/ip_resolver_test.go | 2 +- .../operator/input/generate/input_test.go | 2 +- pkg/stanza/operator/input/tcp/input_test.go | 2 +- pkg/stanza/operator/input/udp/input.go | 4 +- pkg/stanza/operator/input/udp/input_test.go | 2 +- .../operator/parser/json/parser_test.go | 2 +- .../operator/parser/regex/parser_test.go | 18 +-- pkg/stanza/operator/parser/uri/parser_test.go | 2 +- .../transformer/recombine/transformer_test.go | 8 +- pkg/stanza/pipeline/config_test.go | 2 +- pkg/stanza/split/splittest/splittest.go | 2 +- pkg/stanza/testutil/mocks.go | 2 +- pkg/translator/azure/resourcelogs_to_logs.go | 2 +- .../azurelogs/resourcelogs_to_logs.go | 2 +- .../jaeger/jaegerproto_to_traces_test.go | 6 +- .../jaeger/jaegerthrift_to_traces_test.go | 2 +- .../jaeger/traces_to_jaegerproto.go | 10 +- .../jaeger/traces_to_jaegerproto_test.go | 2 +- pkg/translator/loki/convert.go | 2 +- pkg/translator/loki/encode.go | 2 +- pkg/translator/loki/logs_to_loki.go | 6 +- pkg/translator/loki/logs_to_loki_test.go | 14 +- pkg/translator/opencensus/metrics_to_oc.go | 24 ++-- pkg/translator/opencensus/oc_to_metrics.go | 2 +- .../opencensus/oc_to_metrics_test.go | 6 +- .../opencensus/oc_to_resource_test.go | 4 +- .../opencensus/oc_to_traces_test.go | 6 +- .../opencensus/resource_to_oc_test.go | 4 +- pkg/translator/opencensus/traces_to_oc.go | 8 +- .../prometheusremotewrite/helper.go | 18 +-- .../prometheusremotewrite/helper_test.go | 4 +- .../prometheusremotewrite/histograms.go | 8 +- .../prometheusremotewrite/histograms_test.go | 4 +- .../prometheusremotewrite/metrics_to_prw.go | 6 +- .../metrics_to_prw_test.go | 4 +- .../metrics_to_prw_v2.go | 6 +- .../number_data_points.go | 4 +- .../number_data_points_v2.go | 6 +- .../otlp_to_openmetrics_metadata.go | 10 +- .../otlp_to_openmetrics_metadata_test.go | 2 +- pkg/translator/signalfx/from_metrics.go | 20 +-- pkg/translator/signalfx/from_metrics_test.go | 2 +- pkg/translator/signalfx/to_metrics_test.go | 2 +- .../skywalking/skywalkingproto_to_traces.go | 4 +- .../skywalkingproto_to_traces_test.go | 4 +- .../zipkinthriftconverter/deserialize.go | 2 +- pkg/translator/zipkin/zipkinv1/thrift_test.go | 6 +- .../zipkin/zipkinv2/from_translator.go | 12 +- .../zipkin/zipkinv2/from_translator_test.go | 12 +- .../zipkin/zipkinv2/to_translator.go | 2 +- .../attributesprocessor/attributes_log.go | 6 +- .../attributes_log_test.go | 2 +- .../attributesprocessor/attributes_metric.go | 16 +-- .../attributes_metric_test.go | 2 +- .../attributesprocessor/attributes_trace.go | 6 +- .../attributes_trace_test.go | 2 +- .../processor_test.go | 10 +- .../internal/data/add.go | 2 +- .../internal/data/datatest/equal.go | 4 +- .../internal/data/expo/expo_test.go | 2 +- .../internal/data/expo/expotest/bins.go | 2 +- .../internal/data/expo/merge.go | 2 +- .../internal/data/expo/scale.go | 2 +- .../internal/data/expo/scale_test.go | 2 +- .../internal/data/expo/zero_test.go | 2 +- .../internal/putil/pslice/pslice.go | 4 +- processor/deltatorateprocessor/processor.go | 8 +- .../deltatorateprocessor/processor_test.go | 6 +- processor/filterprocessor/expr_test.go | 14 +- processor/filterprocessor/metrics_test.go | 4 +- .../geoipprocessor/geoip_processor_logs.go | 6 +- .../geoipprocessor/geoip_processor_metrics.go | 16 +-- .../geoipprocessor/geoip_processor_traces.go | 6 +- .../groupbyattrsprocessor/attribute_groups.go | 12 +- .../attribute_groups_test.go | 8 +- processor/groupbyattrsprocessor/processor.go | 30 ++--- .../groupbyattrsprocessor/processor_test.go | 90 ++++++------- processor/groupbytraceprocessor/event_test.go | 4 +- .../groupbytraceprocessor/processor_test.go | 2 +- .../groupbytraceprocessor/storage_memory.go | 2 +- processor/intervalprocessor/processor.go | 2 +- processor/k8sattributesprocessor/processor.go | 8 +- .../k8sattributesprocessor/processor_test.go | 2 +- processor/logdedupprocessor/counter_test.go | 2 +- processor/logdedupprocessor/processor.go | 4 +- .../logstransformprocessor/processor_test.go | 2 +- .../metricsgenerationprocessor/processor.go | 2 +- .../processor_test.go | 4 +- processor/metricsgenerationprocessor/utils.go | 18 +-- .../metricstransformprocessor/factory.go | 2 +- .../metrics_testcase_builder_test.go | 4 +- .../metrics_transform_processor_otlp.go | 26 ++-- .../operation_scale_value.go | 6 +- .../operation_toggle_scalar_datatype.go | 2 +- .../fnvhasher_test.go | 2 +- .../logsprocessor_test.go | 2 +- .../tracesprocessor_test.go | 12 +- processor/redactionprocessor/processor.go | 28 ++-- .../redactionprocessor/processor_test.go | 4 +- .../remotetapprocessor/processor_test.go | 6 +- .../internal/resourcedetection_test.go | 2 +- .../resourcedetection_processor.go | 8 +- .../resourcedetection_processor_test.go | 8 +- .../resourceprocessor/resource_processor.go | 6 +- processor/routingprocessor/logs.go | 4 +- processor/routingprocessor/metrics.go | 4 +- processor/routingprocessor/metrics_test.go | 2 +- processor/routingprocessor/traces.go | 4 +- .../internal/changelist/changelist.go | 2 +- .../internal/fixture/parallel.go | 2 +- .../transformer/attributes_operators.go | 10 +- .../transformer/conditional_attributes.go | 10 +- .../multi_conditional_attributes.go | 2 +- .../internal/transformer/signal_name.go | 2 +- .../internal/translation/translation.go | 10 +- .../translation/translation_helpers_test.go | 8 +- .../translation/translation_race_test.go | 18 +-- .../internal/translation/translation_test.go | 44 +++---- .../internal/translation/version_test.go | 2 +- processor/spanprocessor/span.go | 6 +- .../aggregate_attributes_processor.go | 26 ++-- processor/sumologicprocessor/attributes.go | 10 +- .../cloud_namespace_processor.go | 6 +- .../log_fields_conversion_processor.go | 6 +- .../sumologicprocessor/nesting_processor.go | 24 ++-- .../sumologicprocessor/processor_test.go | 6 +- .../translate_attributes_processor.go | 4 +- .../translate_attributes_processor_test.go | 2 +- .../translate_docker_metrics_processor.go | 6 +- .../translate_telegraf_metrics_processor.go | 6 +- .../internal/idbatcher/id_batcher.go | 2 +- .../internal/idbatcher/id_batcher_test.go | 6 +- .../internal/sampling/composite.go | 2 +- .../internal/sampling/composite_test.go | 16 +-- .../internal/sampling/ottl.go | 8 +- .../internal/sampling/probabilistic_test.go | 2 +- .../sampling/span_count_sampler_test.go | 2 +- .../sampling/string_tag_filter_test.go | 4 +- .../internal/sampling/util.go | 18 +-- .../internal/sampling/util_test.go | 4 +- processor/tailsamplingprocessor/processor.go | 6 +- .../processor_benchmarks_test.go | 4 +- .../tailsamplingprocessor/processor_test.go | 14 +- .../internal/common/logs.go | 6 +- .../internal/common/metrics.go | 20 +-- .../internal/common/processor.go | 18 +-- .../internal/common/traces.go | 14 +- ...nvert_exponential_hist_to_explicit_hist.go | 6 +- ..._exponential_hist_to_explicit_hist_test.go | 4 +- .../func_convert_summary_count_val_to_sum.go | 2 +- .../func_convert_summary_sum_val_to_sum.go | 2 +- .../metrics/func_extract_count_metric.go | 6 +- .../metrics/func_extract_sum_metric.go | 6 +- .../internal/metrics/func_scale.go | 8 +- .../internal/metrics/func_scale_test.go | 2 +- .../internal/traces/processor_test.go | 8 +- .../transformprocessor/processor_test.go | 4 +- .../aerospikereceiver/integration_test.go | 2 +- receiver/awscloudwatchreceiver/logs_test.go | 2 +- .../k8sapiserver/k8sapiserver_test.go | 4 +- .../awsfirehosereceiver/benchmark_test.go | 8 +- .../unmarshaler/cwlog/unmarshaler_test.go | 2 +- .../cwmetricstream/unmarshaler_test.go | 4 +- .../otlpmetricstream/unmarshaler_test.go | 4 +- receiver/awsfirehosereceiver/logs_receiver.go | 2 +- .../awsfirehosereceiver/metrics_receiver.go | 2 +- receiver/awss3receiver/notifications.go | 2 +- receiver/awss3receiver/notifications_test.go | 2 +- .../internal/udppoller/poller.go | 2 +- .../internal/client/plaintext_client.go | 2 +- .../protocol/regex_parser_test.go | 4 +- receiver/cloudfoundryreceiver/receiver.go | 4 +- receiver/collectdreceiver/receiver_test.go | 2 +- receiver/couchdbreceiver/metrics.go | 2 +- .../service_check_translator_test.go | 2 +- .../internal/translator/sketches_test.go | 6 +- .../translator/stats_translator_test.go | 8 +- .../internal/translator/testutil.go | 4 +- .../internal/translator/traces_translator.go | 2 +- .../translator/traces_translator_test.go | 6 +- .../dockerstatsreceiver/integration_test.go | 2 +- receiver/filelogreceiver/filelog_test.go | 2 +- receiver/filelogreceiver/storage_test.go | 6 +- receiver/fluentforwardreceiver/conversion.go | 2 +- .../fluentforwardreceiver/conversion_test.go | 6 +- .../fluentforwardreceiver/receiver_test.go | 4 +- .../internal/scraper/githubscraper/helpers.go | 2 +- .../trace_event_handling_test.go | 2 +- .../internal/filter/testhelpers_test.go | 2 +- .../internal/metadata/metricsbuilder_test.go | 2 +- .../statsreader/timestampsgenerator_test.go | 2 +- .../hostmetrics_receiver_test.go | 6 +- .../scraper/diskscraper/disk_scraper_test.go | 2 +- .../filesystem_scraper_test.go | 4 +- .../scraper/loadscraper/load_scraper_test.go | 2 +- .../loadscraper/load_scraper_windows_test.go | 6 +- .../memoryscraper/memory_scraper_test.go | 2 +- .../processes_scraper_test.go | 6 +- .../scraper/processscraper/process_scraper.go | 2 +- .../processscraper/process_scraper_test.go | 18 +-- .../hostmetricsreceiver/internal/testutils.go | 6 +- receiver/jaegerreceiver/jaeger_agent_test.go | 2 +- .../internal/metadata/entities_test.go | 2 +- .../k8sclusterreceiver/mock_resources_test.go | 8 +- .../unstructured_to_logdata_test.go | 2 +- .../broker_scraper_test.go | 2 +- .../topic_scraper_test.go | 2 +- receiver/kafkareceiver/header_extraction.go | 6 +- .../kafkareceiver/header_extraction_test.go | 6 +- .../internal/kubelet/accumulator_test.go | 2 +- .../internal/kubelet/metrics_test.go | 18 +-- .../internal/parser/parser.go | 2 +- receiver/mongodbatlasreceiver/alerts_test.go | 6 +- receiver/mysqlreceiver/scraper.go | 12 +- receiver/namedpipereceiver/namedpipe_test.go | 2 +- .../internal/ocmetrics/opencensus_test.go | 6 +- .../internal/octrace/observability_test.go | 4 +- .../internal/octrace/opencensus_test.go | 6 +- receiver/oracledbreceiver/scraper_test.go | 2 +- .../internal/arrow/arrow_test.go | 22 ++-- .../internal/logs/otlp_test.go | 2 +- .../internal/metrics/otlp_test.go | 2 +- .../internal/trace/otlp_test.go | 2 +- receiver/otelarrowreceiver/otelarrow_test.go | 10 +- receiver/otlpjsonfilereceiver/file.go | 18 +-- .../podmanreceiver/record_metrics_test.go | 2 +- .../internal/metricfamily.go | 10 +- .../internal/metrics_adjuster.go | 16 +-- .../internal/metrics_adjuster_test.go | 6 +- .../internal/metricsutil_test.go | 4 +- .../internal/staleness_end_to_end_test.go | 2 +- .../internal/starttimemetricadjuster.go | 20 +-- .../internal/starttimemetricadjuster_test.go | 26 ++-- .../internal/transaction.go | 2 +- .../internal/transaction_test.go | 22 ++-- .../metrics_receiver_helper_test.go | 28 ++-- .../metrics_receiver_non_numerical_test.go | 4 +- ...trics_receiver_scrape_config_files_test.go | 2 +- .../metrics_receiver_test.go | 10 +- .../prometheusremotewritereceiver/receiver.go | 2 +- receiver/receivercreator/consumer.go | 6 +- receiver/redisreceiver/redis_scraper.go | 2 +- receiver/signalfxreceiver/receiver.go | 2 +- receiver/signalfxreceiver/receiver_test.go | 6 +- receiver/solacereceiver/receiver_test.go | 2 +- receiver/solacereceiver/unmarshaller_test.go | 4 +- receiver/splunkhecreceiver/receiver_test.go | 8 +- .../splunk_to_logdata_test.go | 2 +- receiver/sshcheckreceiver/scraper_test.go | 2 +- .../internal/protocol/metric_translator.go | 2 +- .../internal/protocol/statsd_parser_test.go | 6 +- receiver/syslogreceiver/syslog_test.go | 4 +- receiver/tcplogreceiver/tcp_test.go | 4 +- receiver/udplogreceiver/udp_test.go | 6 +- receiver/wavefrontreceiver/receiver_test.go | 6 +- .../receiver_windows_test.go | 8 +- receiver/zipkinreceiver/proto_parse_test.go | 4 +- .../metrics/metrics_test_harness.go | 2 +- testbed/datasenders/fluent.go | 6 +- testbed/datasenders/k8s.go | 6 +- testbed/datasenders/stanza.go | 6 +- testbed/datasenders/syslog.go | 6 +- testbed/datasenders/tcpudp.go | 6 +- .../traces_exporter.go | 6 +- 557 files changed, 2119 insertions(+), 2119 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index a0efd3c2fd29f..557fbee1d2397 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -210,6 +210,7 @@ linters: - goimports - gosec - govet + - intrange - misspell - nolintlint - predeclared diff --git a/cmd/telemetrygen/pkg/logs/logs.go b/cmd/telemetrygen/pkg/logs/logs.go index b96ffc41948f2..b61c42fcfc31e 100644 --- a/cmd/telemetrygen/pkg/logs/logs.go +++ b/cmd/telemetrygen/pkg/logs/logs.go @@ -68,7 +68,7 @@ func run(c *Config, expF exporterFunc, logger *zap.Logger) error { return err } - for i := 0; i < c.WorkerCount; i++ { + for i := range c.WorkerCount { wg.Add(1) w := worker{ numLogs: c.NumLogs, diff --git a/cmd/telemetrygen/pkg/metrics/metrics.go b/cmd/telemetrygen/pkg/metrics/metrics.go index bfbc9ab529e96..4aded37063d15 100644 --- a/cmd/telemetrygen/pkg/metrics/metrics.go +++ b/cmd/telemetrygen/pkg/metrics/metrics.go @@ -63,7 +63,7 @@ func run(c *Config, expF exporterFunc, logger *zap.Logger) error { running := &atomic.Bool{} running.Store(true) - for i := 0; i < c.WorkerCount; i++ { + for i := range c.WorkerCount { wg.Add(1) w := worker{ numMetrics: c.NumMetrics, diff --git a/cmd/telemetrygen/pkg/metrics/worker_test.go b/cmd/telemetrygen/pkg/metrics/worker_test.go index fe83abdcc81f1..73d1e03ab743a 100644 --- a/cmd/telemetrygen/pkg/metrics/worker_test.go +++ b/cmd/telemetrygen/pkg/metrics/worker_test.go @@ -139,7 +139,7 @@ func TestSumNoTelemetryAttrs(t *testing.T) { require.Len(t, m.rms, qty) rms := m.rms - for i := 0; i < qty; i++ { + for i := range qty { ms := rms[i].ScopeMetrics[0].Metrics[0] assert.Equal(t, "test", ms.Name) // @note update when telemetrygen allow other metric types @@ -167,7 +167,7 @@ func TestGaugeNoTelemetryAttrs(t *testing.T) { require.Len(t, m.rms, qty) rms := m.rms - for i := 0; i < qty; i++ { + for i := range qty { ms := rms[i].ScopeMetrics[0].Metrics[0] assert.Equal(t, "test", ms.Name) // @note update when telemetrygen allow other metric types @@ -195,7 +195,7 @@ func TestSumSingleTelemetryAttr(t *testing.T) { require.Len(t, m.rms, qty) rms := m.rms - for i := 0; i < qty; i++ { + for i := range qty { ms := rms[i].ScopeMetrics[0].Metrics[0] assert.Equal(t, "test", ms.Name) // @note update when telemetrygen allow other metric types @@ -225,7 +225,7 @@ func TestGaugeSingleTelemetryAttr(t *testing.T) { require.Len(t, m.rms, qty) rms := m.rms - for i := 0; i < qty; i++ { + for i := range qty { ms := rms[i].ScopeMetrics[0].Metrics[0] assert.Equal(t, "test", ms.Name) // @note update when telemetrygen allow other metric types @@ -256,7 +256,7 @@ func TestSumMultipleTelemetryAttr(t *testing.T) { rms := m.rms var actualValue attribute.Value - for i := 0; i < qty; i++ { + for i := range qty { ms := rms[i].ScopeMetrics[0].Metrics[0] // @note update when telemetrygen allow other metric types attr := ms.Data.(metricdata.Sum[int64]).DataPoints[0].Attributes @@ -288,7 +288,7 @@ func TestGaugeMultipleTelemetryAttr(t *testing.T) { rms := m.rms var actualValue attribute.Value - for i := 0; i < qty; i++ { + for i := range qty { ms := rms[i].ScopeMetrics[0].Metrics[0] // @note update when telemetrygen allow other metric types attr := ms.Data.(metricdata.Gauge[int64]).DataPoints[0].Attributes diff --git a/cmd/telemetrygen/pkg/traces/traces.go b/cmd/telemetrygen/pkg/traces/traces.go index 1a281e5faa53d..284b054bf6834 100644 --- a/cmd/telemetrygen/pkg/traces/traces.go +++ b/cmd/telemetrygen/pkg/traces/traces.go @@ -140,7 +140,7 @@ func run(c *Config, logger *zap.Logger) error { telemetryAttributes := c.GetTelemetryAttributes() - for i := 0; i < c.WorkerCount; i++ { + for i := range c.WorkerCount { wg.Add(1) w := worker{ numTraces: c.NumTraces, diff --git a/cmd/telemetrygen/pkg/traces/worker.go b/cmd/telemetrygen/pkg/traces/worker.go index c3cc86d747ff0..42270c04150a3 100644 --- a/cmd/telemetrygen/pkg/traces/worker.go +++ b/cmd/telemetrygen/pkg/traces/worker.go @@ -62,7 +62,7 @@ func (w worker) simulateTraces(telemetryAttributes []attribute.KeyValue) { trace.WithTimestamp(spanStart), ) sp.SetAttributes(telemetryAttributes...) - for j := 0; j < w.loadSize; j++ { + for j := range w.loadSize { sp.SetAttributes(attribute.String(fmt.Sprintf("load-%v", j), string(make([]byte, charactersPerMB)))) } @@ -77,7 +77,7 @@ func (w worker) simulateTraces(telemetryAttributes []attribute.KeyValue) { } var endTimestamp trace.SpanEventOption - for j := 0; j < w.numChildSpans; j++ { + for j := range w.numChildSpans { if err := limiter.Wait(context.Background()); err != nil { w.logger.Fatal("limiter waited failed, retry", zap.Error(err)) } diff --git a/connector/countconnector/connector.go b/connector/countconnector/connector.go index 3a6ef3f443f0e..03bb33bdf2cc1 100644 --- a/connector/countconnector/connector.go +++ b/connector/countconnector/connector.go @@ -45,20 +45,20 @@ func (c *count) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { var multiError error countMetrics := pmetric.NewMetrics() countMetrics.ResourceMetrics().EnsureCapacity(td.ResourceSpans().Len()) - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { resourceSpan := td.ResourceSpans().At(i) spansCounter := newCounter[ottlspan.TransformContext](c.spansMetricDefs) spanEventsCounter := newCounter[ottlspanevent.TransformContext](c.spanEventsMetricDefs) - for j := 0; j < resourceSpan.ScopeSpans().Len(); j++ { + for j := range resourceSpan.ScopeSpans().Len() { scopeSpan := resourceSpan.ScopeSpans().At(j) - for k := 0; k < scopeSpan.Spans().Len(); k++ { + for k := range scopeSpan.Spans().Len() { span := scopeSpan.Spans().At(k) sCtx := ottlspan.NewTransformContext(span, scopeSpan.Scope(), resourceSpan.Resource(), scopeSpan, resourceSpan) multiError = errors.Join(multiError, spansCounter.update(ctx, span.Attributes(), sCtx)) - for l := 0; l < span.Events().Len(); l++ { + for l := range span.Events().Len() { event := span.Events().At(l) eCtx := ottlspanevent.NewTransformContext(event, span, scopeSpan.Scope(), resourceSpan.Resource(), scopeSpan, resourceSpan) multiError = errors.Join(multiError, spanEventsCounter.update(ctx, event.Attributes(), eCtx)) @@ -90,15 +90,15 @@ func (c *count) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { var multiError error countMetrics := pmetric.NewMetrics() countMetrics.ResourceMetrics().EnsureCapacity(md.ResourceMetrics().Len()) - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { resourceMetric := md.ResourceMetrics().At(i) metricsCounter := newCounter[ottlmetric.TransformContext](c.metricsMetricDefs) dataPointsCounter := newCounter[ottldatapoint.TransformContext](c.dataPointsMetricDefs) - for j := 0; j < resourceMetric.ScopeMetrics().Len(); j++ { + for j := range resourceMetric.ScopeMetrics().Len() { scopeMetrics := resourceMetric.ScopeMetrics().At(j) - for k := 0; k < scopeMetrics.Metrics().Len(); k++ { + for k := range scopeMetrics.Metrics().Len() { metric := scopeMetrics.Metrics().At(k) mCtx := ottlmetric.NewTransformContext(metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric) multiError = errors.Join(multiError, metricsCounter.update(ctx, pcommon.NewMap(), mCtx)) @@ -107,31 +107,31 @@ func (c *count) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { switch metric.Type() { case pmetric.MetricTypeGauge: dps := metric.Gauge().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric) multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dps.At(i).Attributes(), dCtx)) } case pmetric.MetricTypeSum: dps := metric.Sum().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric) multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dps.At(i).Attributes(), dCtx)) } case pmetric.MetricTypeSummary: dps := metric.Summary().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric) multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dps.At(i).Attributes(), dCtx)) } case pmetric.MetricTypeHistogram: dps := metric.Histogram().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric) multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dps.At(i).Attributes(), dCtx)) } case pmetric.MetricTypeExponentialHistogram: dps := metric.ExponentialHistogram().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric) multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dps.At(i).Attributes(), dCtx)) } @@ -165,14 +165,14 @@ func (c *count) ConsumeLogs(ctx context.Context, ld plog.Logs) error { var multiError error countMetrics := pmetric.NewMetrics() countMetrics.ResourceMetrics().EnsureCapacity(ld.ResourceLogs().Len()) - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { resourceLog := ld.ResourceLogs().At(i) counter := newCounter[ottllog.TransformContext](c.logsMetricDefs) - for j := 0; j < resourceLog.ScopeLogs().Len(); j++ { + for j := range resourceLog.ScopeLogs().Len() { scopeLogs := resourceLog.ScopeLogs().At(j) - for k := 0; k < scopeLogs.LogRecords().Len(); k++ { + for k := range scopeLogs.LogRecords().Len() { logRecord := scopeLogs.LogRecords().At(k) lCtx := ottllog.NewTransformContext(logRecord, scopeLogs.Scope(), resourceLog.Resource(), scopeLogs, resourceLog) diff --git a/connector/datadogconnector/benchmark_test.go b/connector/datadogconnector/benchmark_test.go index efdeb0a4290d2..2d317a65a5e44 100644 --- a/connector/datadogconnector/benchmark_test.go +++ b/connector/datadogconnector/benchmark_test.go @@ -79,7 +79,7 @@ func benchmarkPeerTags(b *testing.B) { b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { err = tconn.ConsumeTraces(context.Background(), genTrace()) assert.NoError(b, err) for { diff --git a/connector/datadogconnector/connector.go b/connector/datadogconnector/connector.go index 3a1aa2479efa4..9755841583249 100644 --- a/connector/datadogconnector/connector.go +++ b/connector/datadogconnector/connector.go @@ -176,7 +176,7 @@ func (c *traceToMetricConnector) addToCache(containerID string, key string) { } func (c *traceToMetricConnector) populateContainerTagsCache(traces ptrace.Traces) { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rs := traces.ResourceSpans().At(i) attrs := rs.Resource().Attributes() diff --git a/connector/exceptionsconnector/connector_logs.go b/connector/exceptionsconnector/connector_logs.go index e8de93475aee1..2b906f1337ba8 100644 --- a/connector/exceptionsconnector/connector_logs.go +++ b/connector/exceptionsconnector/connector_logs.go @@ -50,7 +50,7 @@ func (c *logsConnector) Capabilities() consumer.Capabilities { // It aggregates the trace data to generate logs. func (c *logsConnector) ConsumeTraces(ctx context.Context, traces ptrace.Traces) error { ld := plog.NewLogs() - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rspans := traces.ResourceSpans().At(i) resourceAttr := rspans.Resource().Attributes() serviceAttr, ok := resourceAttr.Get(conventions.AttributeServiceName) @@ -59,14 +59,14 @@ func (c *logsConnector) ConsumeTraces(ctx context.Context, traces ptrace.Traces) } serviceName := serviceAttr.Str() ilsSlice := rspans.ScopeSpans() - for j := 0; j < ilsSlice.Len(); j++ { + for j := range ilsSlice.Len() { sl := c.newScopeLogs(ld) ils := ilsSlice.At(j) ils.Scope().CopyTo(sl.Scope()) spans := ils.Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { span := spans.At(k) - for l := 0; l < span.Events().Len(); l++ { + for l := range span.Events().Len() { event := span.Events().At(l) if event.Name() == eventNameExc { c.attrToLogRecord(sl, serviceName, span, event, resourceAttr) diff --git a/connector/exceptionsconnector/connector_metrics.go b/connector/exceptionsconnector/connector_metrics.go index a9a62d39143ed..354eff853eaa4 100644 --- a/connector/exceptionsconnector/connector_metrics.go +++ b/connector/exceptionsconnector/connector_metrics.go @@ -73,7 +73,7 @@ func (c *metricsConnector) Capabilities() consumer.Capabilities { // ConsumeTraces implements the consumer.Traces interface. // It aggregates the trace data to generate metrics. func (c *metricsConnector) ConsumeTraces(ctx context.Context, traces ptrace.Traces) error { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rspans := traces.ResourceSpans().At(i) resourceAttr := rspans.Resource().Attributes() serviceAttr, ok := resourceAttr.Get(conventions.AttributeServiceName) @@ -82,12 +82,12 @@ func (c *metricsConnector) ConsumeTraces(ctx context.Context, traces ptrace.Trac } serviceName := serviceAttr.Str() ilsSlice := rspans.ScopeSpans() - for j := 0; j < ilsSlice.Len(); j++ { + for j := range ilsSlice.Len() { ils := ilsSlice.At(j) spans := ils.Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { span := spans.At(k) - for l := 0; l < span.Events().Len(); l++ { + for l := range span.Events().Len() { event := span.Events().At(l) if event.Name() == eventNameExc { eventAttrs := event.Attributes() @@ -140,7 +140,7 @@ func (c *metricsConnector) collectExceptions(ilm pmetric.ScopeMetrics) error { dp.SetStartTimestamp(c.startTimestamp) dp.SetTimestamp(timestamp) dp.SetIntValue(int64(exc.count)) - for i := 0; i < exc.exemplars.Len(); i++ { + for i := range exc.exemplars.Len() { exc.exemplars.At(i).SetTimestamp(timestamp) } dp.Exemplars().EnsureCapacity(exc.exemplars.Len()) diff --git a/connector/exceptionsconnector/connector_metrics_test.go b/connector/exceptionsconnector/connector_metrics_test.go index a478abf01409a..220873fb1c451 100644 --- a/connector/exceptionsconnector/connector_metrics_test.go +++ b/connector/exceptionsconnector/connector_metrics_test.go @@ -115,7 +115,7 @@ func BenchmarkConnectorConsumeTraces(b *testing.B) { // Test ctx := metadata.NewIncomingContext(context.Background(), nil) - for n := 0; n < b.N; n++ { + for range b.N { assert.NoError(b, conn.ConsumeTraces(ctx, traces)) } } @@ -189,7 +189,7 @@ func verifyConsumeMetricsInput(tb testing.TB, input pmetric.Metrics, numCumulati assert.True(tb, m.At(0).Sum().IsMonotonic()) callsDps := m.At(0).Sum().DataPoints() require.Equal(tb, 3, callsDps.Len()) - for dpi := 0; dpi < 3; dpi++ { + for dpi := range 3 { dp := callsDps.At(dpi) assert.Equal(tb, int64(numCumulativeConsumptions), dp.IntValue(), "There should only be one metric per Service/kind combination") assert.NotZero(tb, dp.StartTimestamp(), "StartTimestamp should be set") diff --git a/connector/failoverconnector/internal/state/pipeline_selector.go b/connector/failoverconnector/internal/state/pipeline_selector.go index 08bcedf9bb5f9..efcd68d9b790e 100644 --- a/connector/failoverconnector/internal/state/pipeline_selector.go +++ b/connector/failoverconnector/internal/state/pipeline_selector.go @@ -91,7 +91,7 @@ func (p *PipelineSelector) retryHighPriorityPipelines(ctx context.Context, retry defer ticker.Stop() - for i := 0; i < len(p.pipelineRetries); i++ { + for i := range len(p.pipelineRetries) { if p.exceededMaxRetries(i) { continue } @@ -109,7 +109,7 @@ func (p *PipelineSelector) retryHighPriorityPipelines(ctx context.Context, retry // checkContinueRetry checks if retry should be suspended if all higher priority levels have exceeded their max retries func (p *PipelineSelector) checkContinueRetry(index int) bool { - for i := 0; i < index; i++ { + for i := range index { if p.constants.MaxRetries == 0 || p.loadRetryCount(i) < p.constants.MaxRetries { return true } @@ -169,7 +169,7 @@ func (p *PipelineSelector) reportStable(idx int) { func NewPipelineSelector(lenPriority int, consts PSConstants) *PipelineSelector { chans := make([]chan bool, lenPriority) - for i := 0; i < lenPriority; i++ { + for i := range lenPriority { chans[i] = make(chan bool) } diff --git a/connector/grafanacloudconnector/connector.go b/connector/grafanacloudconnector/connector.go index 52df831ca40aa..6e93b11f55f8b 100644 --- a/connector/grafanacloudconnector/connector.go +++ b/connector/grafanacloudconnector/connector.go @@ -70,7 +70,7 @@ func (c *connectorImp) Capabilities() consumer.Capabilities { // ConsumeTraces implements connector.Traces. func (c *connectorImp) ConsumeTraces(_ context.Context, td ptrace.Traces) error { - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { resourceSpan := td.ResourceSpans().At(i) attrs := resourceSpan.Resource().Attributes() mapping := attrs.AsRaw() diff --git a/connector/grafanacloudconnector/host_metrics_test.go b/connector/grafanacloudconnector/host_metrics_test.go index ac790f1c7045f..96bdab60300dd 100644 --- a/connector/grafanacloudconnector/host_metrics_test.go +++ b/connector/grafanacloudconnector/host_metrics_test.go @@ -52,7 +52,7 @@ func TestHostMetrics(t *testing.T) { rm := metrics.ResourceMetrics() metric := rm.At(0).ScopeMetrics().At(0).Metrics().At(0) assert.Equal(t, hostInfoMetric, metric.Name()) - for i := 0; i < count; i++ { + for i := range count { dp := metric.Gauge().DataPoints().At(i) val, ok := dp.Attributes().Get(hostIdentifierAttr) assert.Assert(t, ok) diff --git a/connector/otlpjsonconnector/connector_test.go b/connector/otlpjsonconnector/connector_test.go index cd433e75ccb3e..d0252c39c81c3 100644 --- a/connector/otlpjsonconnector/connector_test.go +++ b/connector/otlpjsonconnector/connector_test.go @@ -218,7 +218,7 @@ func BenchmarkConsumeLogs(b *testing.B) { testTraces, _ := golden.ReadLogs(filepath.Join("testdata", "logsToTraces", inputTraces)) testMetrics, _ := golden.ReadLogs(filepath.Join("testdata", "logsToMetrics", inputMetrics)) - for i := 0; i < b.N; i++ { + for range b.N { assert.NoError(b, logscon.ConsumeLogs(context.Background(), testLogs)) assert.NoError(b, traceconn.ConsumeLogs(context.Background(), testTraces)) assert.NoError(b, metricconn.ConsumeLogs(context.Background(), testMetrics)) diff --git a/connector/otlpjsonconnector/logs.go b/connector/otlpjsonconnector/logs.go index 4be368b89688b..d345023e10b50 100644 --- a/connector/otlpjsonconnector/logs.go +++ b/connector/otlpjsonconnector/logs.go @@ -43,11 +43,11 @@ func (c *connectorLogs) Capabilities() consumer.Capabilities { func (c *connectorLogs) ConsumeLogs(ctx context.Context, pl plog.Logs) error { // loop through the levels of logs logsUnmarshaler := &plog.JSONUnmarshaler{} - for i := 0; i < pl.ResourceLogs().Len(); i++ { + for i := range pl.ResourceLogs().Len() { li := pl.ResourceLogs().At(i) - for j := 0; j < li.ScopeLogs().Len(); j++ { + for j := range li.ScopeLogs().Len() { logRecord := li.ScopeLogs().At(j) - for k := 0; k < logRecord.LogRecords().Len(); k++ { + for k := range logRecord.LogRecords().Len() { lRecord := logRecord.LogRecords().At(k) token := lRecord.Body() diff --git a/connector/otlpjsonconnector/metrics.go b/connector/otlpjsonconnector/metrics.go index 603a5c9b54543..43eb111fd4dbd 100644 --- a/connector/otlpjsonconnector/metrics.go +++ b/connector/otlpjsonconnector/metrics.go @@ -44,11 +44,11 @@ func (c *connectorMetrics) Capabilities() consumer.Capabilities { func (c *connectorMetrics) ConsumeLogs(ctx context.Context, pl plog.Logs) error { // loop through the levels of logs metricsUnmarshaler := &pmetric.JSONUnmarshaler{} - for i := 0; i < pl.ResourceLogs().Len(); i++ { + for i := range pl.ResourceLogs().Len() { li := pl.ResourceLogs().At(i) - for j := 0; j < li.ScopeLogs().Len(); j++ { + for j := range li.ScopeLogs().Len() { logRecord := li.ScopeLogs().At(j) - for k := 0; k < logRecord.LogRecords().Len(); k++ { + for k := range logRecord.LogRecords().Len() { lRecord := logRecord.LogRecords().At(k) token := lRecord.Body() diff --git a/connector/otlpjsonconnector/traces.go b/connector/otlpjsonconnector/traces.go index 2b1e4b1f7bc9f..e40a6e4724a9a 100644 --- a/connector/otlpjsonconnector/traces.go +++ b/connector/otlpjsonconnector/traces.go @@ -44,11 +44,11 @@ func (c *connectorTraces) Capabilities() consumer.Capabilities { func (c *connectorTraces) ConsumeLogs(ctx context.Context, pl plog.Logs) error { // loop through the levels of logs tracesUnmarshaler := &ptrace.JSONUnmarshaler{} - for i := 0; i < pl.ResourceLogs().Len(); i++ { + for i := range pl.ResourceLogs().Len() { li := pl.ResourceLogs().At(i) - for j := 0; j < li.ScopeLogs().Len(); j++ { + for j := range li.ScopeLogs().Len() { logRecord := li.ScopeLogs().At(j) - for k := 0; k < logRecord.LogRecords().Len(); k++ { + for k := range logRecord.LogRecords().Len() { lRecord := logRecord.LogRecords().At(k) token := lRecord.Body() diff --git a/connector/servicegraphconnector/connector.go b/connector/servicegraphconnector/connector.go index 3cae81614a709..fc795cebcc658 100644 --- a/connector/servicegraphconnector/connector.go +++ b/connector/servicegraphconnector/connector.go @@ -224,7 +224,7 @@ func (p *serviceGraphConnector) aggregateMetrics(ctx context.Context, td ptrace. ) rss := td.ResourceSpans() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { rSpans := rss.At(i) rAttributes := rSpans.Resource().Attributes() @@ -236,9 +236,9 @@ func (p *serviceGraphConnector) aggregateMetrics(ctx context.Context, td ptrace. } scopeSpans := rSpans.ScopeSpans() - for j := 0; j < scopeSpans.Len(); j++ { + for j := range scopeSpans.Len() { spans := scopeSpans.At(j).Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { span := spans.At(k) connectionType := store.Unknown diff --git a/connector/servicegraphconnector/internal/store/store_test.go b/connector/servicegraphconnector/internal/store/store_test.go index fb4bc77ef1aec..cc5869871fcdb 100644 --- a/connector/servicegraphconnector/internal/store/store_test.go +++ b/connector/servicegraphconnector/internal/store/store_test.go @@ -103,7 +103,7 @@ func TestStoreExpire(t *testing.T) { const testSize = 100 keys := map[Key]struct{}{} - for i := 0; i < testSize; i++ { + for i := range testSize { keys[NewKey(pcommon.TraceID([16]byte{byte(i)}), pcommon.SpanID([8]byte{1, 2, 3}))] = struct{}{} } diff --git a/connector/spanmetricsconnector/connector.go b/connector/spanmetricsconnector/connector.go index 3a0678dd2839f..11b37307e6c87 100644 --- a/connector/spanmetricsconnector/connector.go +++ b/connector/spanmetricsconnector/connector.go @@ -369,7 +369,7 @@ func (p *connectorImp) resetState() { // dimensions the user has configured. func (p *connectorImp) aggregateMetrics(traces ptrace.Traces) { startTimestamp := pcommon.NewTimestampFromTime(p.clock.Now()) - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rspans := traces.ResourceSpans().At(i) resourceAttr := rspans.Resource().Attributes() serviceAttr, ok := resourceAttr.Get(conventions.AttributeServiceName) @@ -385,10 +385,10 @@ func (p *connectorImp) aggregateMetrics(traces ptrace.Traces) { unitDivider := unitDivider(p.config.Histogram.Unit) serviceName := serviceAttr.Str() ilsSlice := rspans.ScopeSpans() - for j := 0; j < ilsSlice.Len(); j++ { + for j := range ilsSlice.Len() { ils := ilsSlice.At(j) spans := ils.Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { span := spans.At(k) // Protect against end timestamps before start timestamps. Assume 0 duration. duration := float64(0) @@ -419,7 +419,7 @@ func (p *connectorImp) aggregateMetrics(traces ptrace.Traces) { // aggregate events metrics if p.events.Enabled { - for l := 0; l < span.Events().Len(); l++ { + for l := range span.Events().Len() { event := span.Events().At(l) eDimensions := p.dimensions eDimensions = append(eDimensions, p.eDimensions...) diff --git a/connector/spanmetricsconnector/connector_test.go b/connector/spanmetricsconnector/connector_test.go index 21d1d83ca0eb2..f54f3ab705231 100644 --- a/connector/spanmetricsconnector/connector_test.go +++ b/connector/spanmetricsconnector/connector_test.go @@ -79,13 +79,13 @@ type span struct { // verifyDisabledHistogram expects that histograms are disabled. func verifyDisabledHistogram(tb testing.TB, input pmetric.Metrics) bool { - for i := 0; i < input.ResourceMetrics().Len(); i++ { + for i := range input.ResourceMetrics().Len() { rm := input.ResourceMetrics().At(i) ism := rm.ScopeMetrics() // Checking all metrics, naming notice: ismC/mC - C here is for Counter. - for ismC := 0; ismC < ism.Len(); ismC++ { + for ismC := range ism.Len() { m := ism.At(ismC).Metrics() - for mC := 0; mC < m.Len(); mC++ { + for mC := range m.Len() { metric := m.At(mC) assert.NotEqual(tb, pmetric.MetricTypeExponentialHistogram, metric.Type()) assert.NotEqual(tb, pmetric.MetricTypeHistogram, metric.Type()) @@ -96,22 +96,22 @@ func verifyDisabledHistogram(tb testing.TB, input pmetric.Metrics) bool { } func verifyExemplarsExist(tb testing.TB, input pmetric.Metrics) bool { - for i := 0; i < input.ResourceMetrics().Len(); i++ { + for i := range input.ResourceMetrics().Len() { rm := input.ResourceMetrics().At(i) ism := rm.ScopeMetrics() // Checking all metrics, naming notice: ismC/mC - C here is for Counter. - for ismC := 0; ismC < ism.Len(); ismC++ { + for ismC := range ism.Len() { m := ism.At(ismC).Metrics() - for mC := 0; mC < m.Len(); mC++ { + for mC := range m.Len() { metric := m.At(mC) if metric.Type() != pmetric.MetricTypeHistogram { continue } dps := metric.Histogram().DataPoints() - for dp := 0; dp < dps.Len(); dp++ { + for dp := range dps.Len() { d := dps.At(dp) assert.Positive(tb, d.Exemplars().Len()) } @@ -156,7 +156,7 @@ func verifyConsumeMetricsInput(tb testing.TB, input pmetric.Metrics, expectedTem require.Equal(tb, 2, input.ResourceMetrics().Len()) - for i := 0; i < input.ResourceMetrics().Len(); i++ { + for i := range input.ResourceMetrics().Len() { rm := input.ResourceMetrics().At(i) var numDataPoints int @@ -185,7 +185,7 @@ func verifyConsumeMetricsInput(tb testing.TB, input pmetric.Metrics, expectedTem seenMetricIDs := make(map[metricID]bool) callsDps := metric.Sum().DataPoints() require.Equal(tb, numDataPoints, callsDps.Len()) - for dpi := 0; dpi < numDataPoints; dpi++ { + for dpi := range numDataPoints { dp := callsDps.At(dpi) assert.Equal(tb, int64(numCumulativeConsumptions), @@ -218,7 +218,7 @@ func verifyConsumeMetricsInput(tb testing.TB, input pmetric.Metrics, expectedTem func verifyExplicitHistogramDataPoints(tb testing.TB, dps pmetric.HistogramDataPointSlice, numDataPoints, numCumulativeConsumptions int) { seenMetricIDs := make(map[metricID]bool) require.Equal(tb, numDataPoints, dps.Len()) - for dpi := 0; dpi < numDataPoints; dpi++ { + for dpi := range numDataPoints { dp := dps.At(dpi) assert.Equal( tb, @@ -235,7 +235,7 @@ func verifyExplicitHistogramDataPoints(tb testing.TB, dps pmetric.HistogramDataP // Find the bucket index where the 11ms duration should belong in. var foundDurationIndex int - for foundDurationIndex = 0; foundDurationIndex < dp.ExplicitBounds().Len(); foundDurationIndex++ { + for foundDurationIndex = range dp.ExplicitBounds().Len() { if dp.ExplicitBounds().At(foundDurationIndex) > sampleDuration { break } @@ -243,7 +243,7 @@ func verifyExplicitHistogramDataPoints(tb testing.TB, dps pmetric.HistogramDataP // Then verify that all histogram buckets are empty except for the bucket with the 11ms duration. var wantBucketCount uint64 - for bi := 0; bi < dp.BucketCounts().Len(); bi++ { + for bi := range dp.BucketCounts().Len() { wantBucketCount = 0 if bi == foundDurationIndex { wantBucketCount = uint64(numCumulativeConsumptions) @@ -257,7 +257,7 @@ func verifyExplicitHistogramDataPoints(tb testing.TB, dps pmetric.HistogramDataP func verifyExponentialHistogramDataPoints(tb testing.TB, dps pmetric.ExponentialHistogramDataPointSlice, numDataPoints, numCumulativeConsumptions int) { seenMetricIDs := make(map[metricID]bool) require.Equal(tb, numDataPoints, dps.Len()) - for dpi := 0; dpi < numDataPoints; dpi++ { + for dpi := range numDataPoints { dp := dps.At(dpi) assert.Equal( tb, @@ -655,7 +655,7 @@ func TestConcurrentShutdown(t *testing.T) { var wg sync.WaitGroup const concurrency = 1000 wg.Add(concurrency) - for i := 0; i < concurrency; i++ { + for range concurrency { go func() { err := p.Shutdown(ctx) assert.NoError(t, err) @@ -958,11 +958,11 @@ func TestResourceMetricsCache(t *testing.T) { assert.Equal(t, 2, p.resourceMetrics.Len()) // consume more batches for new resources. Max size is exceeded causing old resource entries to be discarded - for i := 0; i < resourceMetricsCacheSize; i++ { + for i := range resourceMetricsCacheSize { traces := buildSampleTrace() // add resource attributes to simulate additional resources providing data - for j := 0; j < traces.ResourceSpans().Len(); j++ { + for j := range traces.ResourceSpans().Len() { traces.ResourceSpans().At(j).Resource().Attributes().PutStr("dummy", fmt.Sprintf("%d", i)) } @@ -1020,11 +1020,11 @@ func TestResourceMetricsKeyAttributes(t *testing.T) { assert.Equal(t, 2, p.resourceMetrics.Len()) // consume more batches for new resources. Max size is exceeded causing old resource entries to be discarded - for i := 0; i < resourceMetricsCacheSize; i++ { + for i := range resourceMetricsCacheSize { traces := buildSampleTrace() // add resource attributes to simulate additional resources providing data - for j := 0; j < traces.ResourceSpans().Len(); j++ { + for j := range traces.ResourceSpans().Len() { traces.ResourceSpans().At(j).Resource().Attributes().PutStr("not included in resource key attributes", fmt.Sprintf("%d", i)) } @@ -1045,7 +1045,7 @@ func BenchmarkConnectorConsumeTraces(b *testing.B) { // Test ctx := metadata.NewIncomingContext(context.Background(), nil) - for n := 0; n < b.N; n++ { + for range b.N { assert.NoError(b, conn.ConsumeTraces(ctx, traces)) } } @@ -1085,13 +1085,13 @@ func TestExcludeDimensionsConsumeTraces(t *testing.T) { require.NoError(t, err) metrics := p.buildMetrics() - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) ism := rm.ScopeMetrics() // Checking all metrics, naming notice: ilmC/mC - C here is for Counter. - for ilmC := 0; ilmC < ism.Len(); ilmC++ { + for ilmC := range ism.Len() { m := ism.At(ilmC).Metrics() - for mC := 0; mC < m.Len(); mC++ { + for mC := range m.Len() { metric := m.At(mC) // We check only sum and histogram metrics here, because for now only they are present in this module. @@ -1099,7 +1099,7 @@ func TestExcludeDimensionsConsumeTraces(t *testing.T) { case pmetric.MetricTypeExponentialHistogram, pmetric.MetricTypeHistogram: { dp := metric.Histogram().DataPoints() - for dpi := 0; dpi < dp.Len(); dpi++ { + for dpi := range dp.Len() { for attributeKey := range dp.At(dpi).Attributes().AsRaw() { assert.NotContains(t, excludeDimensions, attributeKey) } @@ -1108,7 +1108,7 @@ func TestExcludeDimensionsConsumeTraces(t *testing.T) { case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge, pmetric.MetricTypeSum, pmetric.MetricTypeSummary: { dp := metric.Sum().DataPoints() - for dpi := 0; dpi < dp.Len(); dpi++ { + for dpi := range dp.Len() { for attributeKey := range dp.At(dpi).Attributes().AsRaw() { assert.NotContains(t, excludeDimensions, attributeKey) } @@ -1514,23 +1514,23 @@ func TestSpanMetrics_Events(t *testing.T) { err = c.ConsumeTraces(context.Background(), buildSampleTrace()) require.NoError(t, err) metrics := c.buildMetrics() - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) ism := rm.ScopeMetrics() - for ilmC := 0; ilmC < ism.Len(); ilmC++ { + for ilmC := range ism.Len() { m := ism.At(ilmC).Metrics() if !tt.shouldEventsMetricExist { assert.Equal(t, 2, m.Len()) continue } assert.Equal(t, 3, m.Len()) - for mC := 0; mC < m.Len(); mC++ { + for mC := range m.Len() { metric := m.At(mC) if metric.Name() != "events" { continue } assert.Equal(t, pmetric.MetricTypeSum, metric.Type()) - for idp := 0; idp < metric.Sum().DataPoints().Len(); idp++ { + for idp := range metric.Sum().DataPoints().Len() { attrs := metric.Sum().DataPoints().At(idp).Attributes() assert.Contains(t, attrs.AsRaw(), exceptionTypeAttrName) } @@ -1624,19 +1624,19 @@ func TestExemplarsAreDiscardedAfterFlushing(t *testing.T) { } func assertDataPointsHaveExactlyOneExemplarForTrace(t *testing.T, metrics pmetric.Metrics, traceID pcommon.TraceID) { - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) ism := rm.ScopeMetrics() // Checking all metrics, naming notice: ilmC/mC - C here is for Counter. - for ilmC := 0; ilmC < ism.Len(); ilmC++ { + for ilmC := range ism.Len() { m := ism.At(ilmC).Metrics() - for mC := 0; mC < m.Len(); mC++ { + for mC := range m.Len() { metric := m.At(mC) switch metric.Type() { case pmetric.MetricTypeSum: dps := metric.Sum().DataPoints() assert.Positive(t, dps.Len()) - for dpi := 0; dpi < dps.Len(); dpi++ { + for dpi := range dps.Len() { dp := dps.At(dpi) assert.Equal(t, 1, dp.Exemplars().Len()) assert.Equal(t, dp.Exemplars().At(0).TraceID(), traceID) @@ -1644,7 +1644,7 @@ func assertDataPointsHaveExactlyOneExemplarForTrace(t *testing.T, metrics pmetri case pmetric.MetricTypeHistogram: dps := metric.Histogram().DataPoints() assert.Positive(t, dps.Len()) - for dpi := 0; dpi < dps.Len(); dpi++ { + for dpi := range dps.Len() { dp := dps.At(dpi) assert.Equal(t, 1, dp.Exemplars().Len()) assert.Equal(t, dp.Exemplars().At(0).TraceID(), traceID) @@ -1652,7 +1652,7 @@ func assertDataPointsHaveExactlyOneExemplarForTrace(t *testing.T, metrics pmetri case pmetric.MetricTypeExponentialHistogram: dps := metric.ExponentialHistogram().DataPoints() assert.Positive(t, dps.Len()) - for dpi := 0; dpi < dps.Len(); dpi++ { + for dpi := range dps.Len() { dp := dps.At(dpi) assert.Equal(t, 1, dp.Exemplars().Len()) assert.Equal(t, dp.Exemplars().At(0).TraceID(), traceID) @@ -1740,7 +1740,7 @@ func TestTimestampsForUninterruptedStream(t *testing.T) { func verifyAndCollectCommonTimestamps(t *testing.T, m pmetric.Metrics) (start pcommon.Timestamp, timestamp pcommon.Timestamp) { // Go through all data points and collect the start timestamp and timestamp. They should be the same value for each data point - for i := 0; i < m.ResourceMetrics().Len(); i++ { + for i := range m.ResourceMetrics().Len() { rm := m.ResourceMetrics().At(i) serviceName, _ := rm.Resource().Attributes().Get("service.name") @@ -1749,16 +1749,16 @@ func verifyAndCollectCommonTimestamps(t *testing.T, m pmetric.Metrics) (start pc } ism := rm.ScopeMetrics() - for ilmC := 0; ilmC < ism.Len(); ilmC++ { + for ilmC := range ism.Len() { m := ism.At(ilmC).Metrics() - for mC := 0; mC < m.Len(); mC++ { + for mC := range m.Len() { metric := m.At(mC) switch metric.Type() { case pmetric.MetricTypeSum: { dps := metric.Sum().DataPoints() - for dpi := 0; dpi < dps.Len(); dpi++ { + for dpi := range dps.Len() { if int64(start) == 0 { start = dps.At(dpi).StartTimestamp() timestamp = dps.At(dpi).Timestamp() @@ -1770,7 +1770,7 @@ func verifyAndCollectCommonTimestamps(t *testing.T, m pmetric.Metrics) (start pc case pmetric.MetricTypeHistogram: { dps := metric.Histogram().DataPoints() - for dpi := 0; dpi < dps.Len(); dpi++ { + for dpi := range dps.Len() { if int64(start) == 0 { start = dps.At(dpi).StartTimestamp() timestamp = dps.At(dpi).Timestamp() diff --git a/connector/spanmetricsconnector/internal/metrics/metrics.go b/connector/spanmetricsconnector/internal/metrics/metrics.go index 50591aec6980b..9e28e3a9373e1 100644 --- a/connector/spanmetricsconnector/internal/metrics/metrics.go +++ b/connector/spanmetricsconnector/internal/metrics/metrics.go @@ -109,7 +109,7 @@ func (m *explicitHistogramMetrics) BuildMetrics( dp.BucketCounts().FromRaw(h.bucketCounts) dp.SetCount(h.count) dp.SetSum(h.sum) - for i := 0; i < h.exemplars.Len(); i++ { + for i := range h.exemplars.Len() { h.exemplars.At(i).SetTimestamp(timestamp) } h.exemplars.CopyTo(dp.Exemplars()) @@ -158,7 +158,7 @@ func (m *exponentialHistogramMetrics) BuildMetrics( dp.SetStartTimestamp(startTimestamp(k)) dp.SetTimestamp(timestamp) expoHistToExponentialDataPoint(m.histogram, dp) - for i := 0; i < m.exemplars.Len(); i++ { + for i := range m.exemplars.Len() { m.exemplars.At(i).SetTimestamp(timestamp) } m.exemplars.CopyTo(dp.Exemplars()) @@ -191,7 +191,7 @@ func expoHistToExponentialDataPoint(agg *structure.Histogram[float64], dp pmetri out.SetOffset(in.Offset()) out.BucketCounts().EnsureCapacity(int(in.Len())) - for i := uint32(0); i < in.Len(); i++ { + for i := range in.Len() { out.BucketCounts().Append(in.At(i)) } } @@ -298,7 +298,7 @@ func (m *SumMetrics) BuildMetrics( dp.SetStartTimestamp(startTimestamp(k)) dp.SetTimestamp(timestamp) dp.SetIntValue(int64(s.count)) - for i := 0; i < s.exemplars.Len(); i++ { + for i := range s.exemplars.Len() { s.exemplars.At(i).SetTimestamp(timestamp) } s.exemplars.CopyTo(dp.Exemplars()) diff --git a/examples/demo/client/main.go b/examples/demo/client/main.go index 1ca3639b2a3f9..74f186e0ec378 100644 --- a/examples/demo/client/main.go +++ b/examples/demo/client/main.go @@ -157,7 +157,7 @@ func main() { span.End() latencyMs := float64(time.Since(startTime)) / 1e6 nr := rand.IntN(7) - for i := 0; i < nr; i++ { + for i := range nr { randLineLength := rand.Int64N(999) lineCounts.Add(ctx, 1, metric.WithAttributes(commonLabels...)) lineLengths.Record(ctx, randLineLength, metric.WithAttributes(commonLabels...)) diff --git a/exporter/alertmanagerexporter/alertmanager_exporter.go b/exporter/alertmanagerexporter/alertmanager_exporter.go index 58a8ba393c732..283ff5d82b25d 100644 --- a/exporter/alertmanagerexporter/alertmanager_exporter.go +++ b/exporter/alertmanagerexporter/alertmanager_exporter.go @@ -44,7 +44,7 @@ func (s *alertmanagerExporter) convertEventSliceToArray(eventSlice ptrace.SpanEv if eventSlice.Len() > 0 { events := make([]*alertmanagerEvent, eventSlice.Len()) - for i := 0; i < eventSlice.Len(); i++ { + for i := range eventSlice.Len() { var severity string severityAttrValue, ok := eventSlice.At(i).Attributes().Get(s.severityAttribute) if ok { @@ -74,7 +74,7 @@ func (s *alertmanagerExporter) extractEvents(td ptrace.Traces) []*alertmanagerEv return nil } - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { resource := rss.At(i).Resource() ilss := rss.At(i).ScopeSpans() @@ -82,9 +82,9 @@ func (s *alertmanagerExporter) extractEvents(td ptrace.Traces) []*alertmanagerEv return nil } - for j := 0; j < ilss.Len(); j++ { + for j := range ilss.Len() { spans := ilss.At(j).Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { traceID := spans.At(k).TraceID() spanID := spans.At(k).SpanID() events = append(events, s.convertEventSliceToArray(spans.At(k).Events(), traceID, spanID)...) diff --git a/exporter/alertmanagerexporter/alertmanager_exporter_test.go b/exporter/alertmanagerexporter/alertmanager_exporter_test.go index f80516145ae22..46af240360863 100644 --- a/exporter/alertmanagerexporter/alertmanager_exporter_test.go +++ b/exporter/alertmanagerexporter/alertmanager_exporter_test.go @@ -85,7 +85,7 @@ func TestAlertManagerExporterExtractEvents(t *testing.T) { traces, span := createTracesAndSpan() // add events - for i := 0; i < tt.events; i++ { + for i := range tt.events { event := span.Events().AppendEmpty() // add event attributes startTime := pcommon.Timestamp(time.Now().UnixNano()) diff --git a/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go b/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go index 21a00312622b1..03f913dca8ba7 100644 --- a/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go +++ b/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go @@ -24,7 +24,7 @@ func createSimpleLogData(numberOfLogs int) plog.Logs { rl.ScopeLogs().AppendEmpty() // Add an empty ScopeLogs sl := rl.ScopeLogs().AppendEmpty() - for i := 0; i < numberOfLogs; i++ { + for i := range numberOfLogs { ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStr("mylog") diff --git a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice.go b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice.go index 5fc5058bfc19d..ba36baf34e9ce 100644 --- a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice.go +++ b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice.go @@ -35,16 +35,16 @@ const ( func logDataToLogService(ld plog.Logs) []*sls.Log { var slsLogs []*sls.Log rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rl := rls.At(i) sl := rl.ScopeLogs() resource := rl.Resource() resourceContents := resourceToLogContents(resource) - for j := 0; j < sl.Len(); j++ { + for j := range sl.Len() { ils := sl.At(j) instrumentationLibraryContents := instrumentationScopeToLogContents(ils.Scope()) logs := ils.LogRecords() - for j := 0; j < logs.Len(); j++ { + for j := range logs.Len() { slsLog := mapLogRecordToLogService(logs.At(j), resourceContents, instrumentationLibraryContents) if slsLog != nil { slsLogs = append(slsLogs, slsLog) diff --git a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go index 1247c6e399fa9..a48edb5044d27 100644 --- a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go @@ -36,7 +36,7 @@ func createLogData(numberOfLogs int) plog.Logs { sl.Scope().SetName("collector") sl.Scope().SetVersion("v0.1.0") - for i := 0; i < numberOfLogs; i++ { + for i := range numberOfLogs { ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() switch i { @@ -96,7 +96,7 @@ func TestLogsDataToLogService(t *testing.T) { t.Errorf("Failed load log key value pairs from %q: %v", resultLogFile, err) return } - for j := 0; j < validLogCount; j++ { + for j := range validLogCount { sort.Sort(logKeyValuePairs(gotLogPairs[j])) sort.Sort(logKeyValuePairs(wantLogs[j])) assert.Equal(t, wantLogs[j], gotLogPairs[j]) diff --git a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice.go b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice.go index d320eea7305b5..07da1bd279363 100644 --- a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice.go +++ b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice.go @@ -89,7 +89,7 @@ func (kv *KeyValues) labelToStringBuilder(sb *strings.Builder) { func formatMetricName(name string) string { var newName []byte - for i := 0; i < len(name); i++ { + for i := range len(name) { b := name[i] if (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || @@ -148,7 +148,7 @@ func resourceToMetricLabels(labels *KeyValues, resource pcommon.Resource) { } func numberMetricsToLogs(name string, data pmetric.NumberDataPointSlice, defaultLabels KeyValues) (logs []*sls.Log) { - for i := 0; i < data.Len(); i++ { + for i := range data.Len() { dataPoint := data.At(i) attributeMap := dataPoint.Attributes() labels := defaultLabels.Clone() @@ -179,7 +179,7 @@ func numberMetricsToLogs(name string, data pmetric.NumberDataPointSlice, default } func doubleHistogramMetricsToLogs(name string, data pmetric.HistogramDataPointSlice, defaultLabels KeyValues) (logs []*sls.Log) { - for i := 0; i < data.Len(); i++ { + for i := range data.Len() { dataPoint := data.At(i) attributeMap := dataPoint.Attributes() labels := defaultLabels.Clone() @@ -198,7 +198,7 @@ func doubleHistogramMetricsToLogs(name string, data pmetric.HistogramDataPointSl bounds := dataPoint.ExplicitBounds() boundsStr := make([]string, bounds.Len()+1) - for i := 0; i < bounds.Len(); i++ { + for i := range bounds.Len() { boundsStr[i] = strconv.FormatFloat(bounds.At(i), 'g', -1, 64) } boundsStr[len(boundsStr)-1] = infinityBoundValue @@ -208,7 +208,7 @@ func doubleHistogramMetricsToLogs(name string, data pmetric.HistogramDataPointSl bucketLabels := labels.Clone() bucketLabels.Append(bucketLabelKey, "") bucketLabels.Sort() - for i := 0; i < bucketCount; i++ { + for i := range bucketCount { bucket := dataPoint.BucketCounts().At(i) bucketLabels.Replace(bucketLabelKey, boundsStr[i]) @@ -226,7 +226,7 @@ func doubleHistogramMetricsToLogs(name string, data pmetric.HistogramDataPointSl } func doubleSummaryMetricsToLogs(name string, data pmetric.SummaryDataPointSlice, defaultLabels KeyValues) (logs []*sls.Log) { - for i := 0; i < data.Len(); i++ { + for i := range data.Len() { dataPoint := data.At(i) attributeMap := dataPoint.Attributes() labels := defaultLabels.Clone() @@ -249,7 +249,7 @@ func doubleSummaryMetricsToLogs(name string, data pmetric.SummaryDataPointSlice, summaryLabels.Sort() values := dataPoint.QuantileValues() - for i := 0; i < values.Len(); i++ { + for i := range values.Len() { value := values.At(i) summaryLabels.Replace(summaryLabelKey, strconv.FormatFloat(value.Quantile(), 'g', -1, 64)) logs = append(logs, newMetricLogFromRaw(name, @@ -283,16 +283,16 @@ func metricsDataToLogServiceData( md pmetric.Metrics, ) (logs []*sls.Log) { resMetrics := md.ResourceMetrics() - for i := 0; i < resMetrics.Len(); i++ { + for i := range resMetrics.Len() { resMetricSlice := resMetrics.At(i) var defaultLabels KeyValues resourceToMetricLabels(&defaultLabels, resMetricSlice.Resource()) insMetricSlice := resMetricSlice.ScopeMetrics() - for j := 0; j < insMetricSlice.Len(); j++ { + for j := range insMetricSlice.Len() { insMetrics := insMetricSlice.At(j) // ignore insMetrics.Scope() metricSlice := insMetrics.Metrics() - for k := 0; k < metricSlice.Len(); k++ { + for k := range metricSlice.Len() { oneMetric := metricSlice.At(k) logs = append(logs, metricDataToLogServiceData(oneMetric, defaultLabels)...) } diff --git a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go index 67e2115173078..82136017ced5b 100644 --- a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go @@ -121,7 +121,7 @@ func TestMetricDataToLogService(t *testing.T) { return } assert.Equal(t, len(wantLogs), len(gotLogs)) - for j := 0; j < len(gotLogs); j++ { + for j := range gotLogs { sort.Sort(logKeyValuePairs(gotLogPairs[j])) sort.Sort(logKeyValuePairs(wantLogs[j])) assert.Equal(t, wantLogs[j], gotLogPairs[j]) diff --git a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice.go b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice.go index f30f62a99b95c..8d8d049a3fcd7 100644 --- a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice.go +++ b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice.go @@ -38,7 +38,7 @@ const ( func traceDataToLogServiceData(td ptrace.Traces) []*sls.Log { var slsLogs []*sls.Log resourceSpansSlice := td.ResourceSpans() - for i := 0; i < resourceSpansSlice.Len(); i++ { + for i := range resourceSpansSlice.Len() { logs := resourceSpansToLogServiceData(resourceSpansSlice.At(i)) slsLogs = append(slsLogs, logs...) } @@ -49,11 +49,11 @@ func resourceSpansToLogServiceData(resourceSpans ptrace.ResourceSpans) []*sls.Lo resourceContents := resourceToLogContents(resourceSpans.Resource()) scopeSpansSlice := resourceSpans.ScopeSpans() var slsLogs []*sls.Log - for i := 0; i < scopeSpansSlice.Len(); i++ { + for i := range scopeSpansSlice.Len() { insLibSpans := scopeSpansSlice.At(i) instrumentationLibraryContents := instrumentationScopeToLogContents(insLibSpans.Scope()) spans := insLibSpans.Spans() - for j := 0; j < spans.Len(); j++ { + for j := range spans.Len() { if slsLog := spanToLogServiceData(spans.At(j), resourceContents, instrumentationLibraryContents); slsLog != nil { slsLogs = append(slsLogs, slsLog) } @@ -178,7 +178,7 @@ func statusCodeToShortString(code ptrace.StatusCode) string { func eventsToString(events ptrace.SpanEventSlice) string { eventArray := make([]map[string]any, 0, events.Len()) - for i := 0; i < events.Len(); i++ { + for i := range events.Len() { spanEvent := events.At(i) event := map[string]any{} event[nameField] = spanEvent.Name() @@ -192,7 +192,7 @@ func eventsToString(events ptrace.SpanEventSlice) string { func spanLinksToString(spanLinkSlice ptrace.SpanLinkSlice) string { linkArray := make([]map[string]any, 0, spanLinkSlice.Len()) - for i := 0; i < spanLinkSlice.Len(); i++ { + for i := range spanLinkSlice.Len() { spanLink := spanLinkSlice.At(i) link := map[string]any{} link[spanIDField] = traceutil.SpanIDToHexOrEmptyString(spanLink.SpanID()) diff --git a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go index 3731480f8c932..5f6ef0a08707f 100644 --- a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go @@ -52,7 +52,7 @@ func TestTraceDataToLogService(t *testing.T) { t.Errorf("Failed load log key value pairs from %q: %v", resultLogFile, err) return } - for j := 0; j < len(gotLogs); j++ { + for j := range gotLogs { sort.Sort(logKeyValuePairs(gotLogPairs[j])) sort.Sort(logKeyValuePairs(wantLogs[j])) assert.Equal(t, wantLogs[j], gotLogPairs[j]) diff --git a/exporter/awscloudwatchlogsexporter/config_test.go b/exporter/awscloudwatchlogsexporter/config_test.go index f52818f158793..4e9694578ca47 100644 --- a/exporter/awscloudwatchlogsexporter/config_test.go +++ b/exporter/awscloudwatchlogsexporter/config_test.go @@ -157,7 +157,7 @@ func TestValidateTags(t *testing.T) { // Create a map with no items and then one with too many items for testing emptyMap := make(map[string]*string) bigMap := make(map[string]*string) - for i := 0; i < 51; i++ { + for i := range 51 { bigMap[strconv.Itoa(i)] = &basicValue } diff --git a/exporter/awscloudwatchlogsexporter/exporter.go b/exporter/awscloudwatchlogsexporter/exporter.go index 9aa14be1e3deb..4db6c2656653b 100644 --- a/exporter/awscloudwatchlogsexporter/exporter.go +++ b/exporter/awscloudwatchlogsexporter/exporter.go @@ -130,16 +130,16 @@ func pushLogsToCWLogs(logger *zap.Logger, ld plog.Logs, config *Config, pusher c var errs error rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rl := rls.At(i) resourceAttrs := attrsValue(rl.Resource().Attributes()) sls := rl.ScopeLogs() - for j := 0; j < sls.Len(); j++ { + for j := range sls.Len() { sl := sls.At(j) scope := sl.Scope() logs := sl.LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { log := logs.At(k) event, err := logToCWLog(resourceAttrs, scope, log, config) if err != nil { diff --git a/exporter/awscloudwatchlogsexporter/exporter_test.go b/exporter/awscloudwatchlogsexporter/exporter_test.go index 160821e1a484b..72f2146b425c9 100644 --- a/exporter/awscloudwatchlogsexporter/exporter_test.go +++ b/exporter/awscloudwatchlogsexporter/exporter_test.go @@ -262,7 +262,7 @@ func BenchmarkLogToCWLog(b *testing.B) { resource := testResource() log := testLogRecord() scope := testScope() - for i := 0; i < b.N; i++ { + for range b.N { _, err := logToCWLog(attrsValue(resource.Attributes()), scope, log, &Config{}) if err != nil { b.Errorf("logToCWLog() failed %v", err) diff --git a/exporter/awsemfexporter/config_test.go b/exporter/awsemfexporter/config_test.go index bdcfa5f39e202..a1fd0a7736e1c 100644 --- a/exporter/awsemfexporter/config_test.go +++ b/exporter/awsemfexporter/config_test.go @@ -186,7 +186,7 @@ func TestValidateTags(t *testing.T) { // Create a map with no items and then one with too many items for testing emptyMap := make(map[string]*string) bigMap := make(map[string]*string) - for i := 0; i < 51; i++ { + for i := range 51 { bigMap[strconv.Itoa(i)] = &basicValue } diff --git a/exporter/awsemfexporter/datapoint.go b/exporter/awsemfexporter/datapoint.go index eb6ccc2b87b8a..3d7429e5ef98a 100644 --- a/exporter/awsemfexporter/datapoint.go +++ b/exporter/awsemfexporter/datapoint.go @@ -466,7 +466,7 @@ func (dps summaryDataPointSlice) CalculateDeltaDatapoints(i int, instrumentation datapoints = append(datapoints, dataPoint{name: fmt.Sprint(dps.metricName, summarySumSuffix), value: sum, labels: labels, timestampMs: timestampMs}) datapoints = append(datapoints, dataPoint{name: fmt.Sprint(dps.metricName, summaryCountSuffix), value: count, labels: labels, timestampMs: timestampMs}) - for i := 0; i < values.Len(); i++ { + for i := range values.Len() { cLabels := maps.Clone(labels) quantile := values.At(i) cLabels["quantile"] = strconv.FormatFloat(quantile.Quantile(), 'g', -1, 64) @@ -494,7 +494,7 @@ func (dps summaryDataPointSlice) IsStaleNaNInf(i int) (bool, pcommon.Map) { } values := metric.QuantileValues() - for i := 0; i < values.Len(); i++ { + for i := range values.Len() { quantile := values.At(i) if math.IsNaN(quantile.Value()) || math.IsNaN(quantile.Quantile()) || math.IsInf(quantile.Value(), 0) || math.IsInf(quantile.Quantile(), 0) { diff --git a/exporter/awsemfexporter/datapoint_test.go b/exporter/awsemfexporter/datapoint_test.go index 9de974c8156d6..e66c69b41a845 100644 --- a/exporter/awsemfexporter/datapoint_test.go +++ b/exporter/awsemfexporter/datapoint_test.go @@ -88,7 +88,7 @@ func generateTestSumMetric(name string, valueType metricValueType) pmetric.Metri otelMetrics := pmetric.NewMetrics() rs := otelMetrics.ResourceMetrics().AppendEmpty() metrics := rs.ScopeMetrics().AppendEmpty().Metrics() - for i := 0; i < 2; i++ { + for i := range 2 { metric := metrics.AppendEmpty() metric.SetName(name) metric.SetUnit("Count") @@ -309,7 +309,7 @@ func generateTestSummaryMetric(name string) pmetric.Metrics { rs := otelMetrics.ResourceMetrics().AppendEmpty() metrics := rs.ScopeMetrics().AppendEmpty().Metrics() - for i := 0; i < 2; i++ { + for i := range 2 { metric := metrics.AppendEmpty() metric.SetName(name) metric.SetUnit("Seconds") @@ -334,7 +334,7 @@ func generateTestSummaryMetricWithNaN(name string) pmetric.Metrics { rs := otelMetrics.ResourceMetrics().AppendEmpty() metrics := rs.ScopeMetrics().AppendEmpty().Metrics() - for i := 0; i < 2; i++ { + for i := range 2 { metric := metrics.AppendEmpty() metric.SetName(name) metric.SetUnit("Seconds") @@ -359,7 +359,7 @@ func generateTestSummaryMetricWithInf(name string) pmetric.Metrics { rs := otelMetrics.ResourceMetrics().AppendEmpty() metrics := rs.ScopeMetrics().AppendEmpty().Metrics() - for i := 0; i < 2; i++ { + for i := range 2 { metric := metrics.AppendEmpty() metric.SetName(name) metric.SetUnit("Seconds") @@ -385,7 +385,7 @@ func generateOtelTestMetrics(generatedOtelMetrics ...pmetric.Metrics) pmetric.Me finalMetrics := rs.ScopeMetrics().AppendEmpty().Metrics() for _, generatedOtelMetric := range generatedOtelMetrics { generatedMetrics := generatedOtelMetric.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() - for i := 0; i < generatedMetrics.Len(); i++ { + for i := range generatedMetrics.Len() { generatedMetric := generatedMetrics.At(i) finalMetric := finalMetrics.AppendEmpty() generatedMetric.CopyTo(finalMetric) @@ -2137,12 +2137,12 @@ func benchmarkGetAndCalculateDeltaDataPoints(b *testing.B, bucketLength int) { emfCalcs := setupEmfCalculators() defer require.NoError(b, shutdownEmfCalculators(emfCalcs)) b.ResetTimer() - for n := 0; n < b.N; n++ { - for i := 0; i < metrics.Len(); i++ { + for range b.N { + for i := range metrics.Len() { metadata := generateTestMetricMetadata("namespace", time.Now().UnixNano()/int64(time.Millisecond), "log-group", "log-stream", "cloudwatch-otel", metrics.At(i).Type(), 0) dps := getDataPoints(metrics.At(i), metadata, zap.NewNop()) - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dps.CalculateDeltaDatapoints(i, "", false, emfCalcs) } } diff --git a/exporter/awsemfexporter/emf_exporter.go b/exporter/awsemfexporter/emf_exporter.go index 34b9eaf76856c..2191f609fdeff 100644 --- a/exporter/awsemfexporter/emf_exporter.go +++ b/exporter/awsemfexporter/emf_exporter.go @@ -99,7 +99,7 @@ func newEmfExporter(config *Config, set exporter.Settings) (*emfExporter, error) func (emf *emfExporter) pushMetricsData(_ context.Context, md pmetric.Metrics) error { rms := md.ResourceMetrics() labels := map[string]string{} - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { rm := rms.At(i) am := rm.Resource().Attributes() if am.Len() > 0 { @@ -115,7 +115,7 @@ func (emf *emfExporter) pushMetricsData(_ context.Context, md pmetric.Metrics) e defaultLogStream := fmt.Sprintf("otel-stream-%s", emf.collectorID) outputDestination := emf.config.OutputDestination - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { err := emf.metricTranslator.translateOTelToGroupedMetric(rms.At(i), groupedMetrics, emf.config) if err != nil { return err diff --git a/exporter/awsemfexporter/grouped_metric.go b/exporter/awsemfexporter/grouped_metric.go index 8d4f8a57aa056..d6c3896487091 100644 --- a/exporter/awsemfexporter/grouped_metric.go +++ b/exporter/awsemfexporter/grouped_metric.go @@ -41,7 +41,7 @@ func addToGroupedMetric( return nil } - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { // Drop stale or NaN metric values if isStaleNanInf, attrs := dps.IsStaleNaNInf(i); isStaleNanInf { if config != nil && config.logger != nil { diff --git a/exporter/awsemfexporter/grouped_metric_test.go b/exporter/awsemfexporter/grouped_metric_test.go index 35b2ffff3f672..57d59a214bfc9 100644 --- a/exporter/awsemfexporter/grouped_metric_test.go +++ b/exporter/awsemfexporter/grouped_metric_test.go @@ -106,7 +106,7 @@ func TestAddToGroupedMetric(t *testing.T) { assert.Equal(t, 1, rms.Len()) assert.Equal(t, 1, ilms.Len()) - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { err := addToGroupedMetric(metrics.At(i), groupedMetrics, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, metrics.At(i).Type(), 0), true, @@ -147,7 +147,7 @@ func TestAddToGroupedMetric(t *testing.T) { metrics := ilms.At(0).Metrics() assert.Equal(t, 9, metrics.Len()) - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { err := addToGroupedMetric(metrics.At(i), groupedMetrics, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, metrics.At(i).Type(), 0), @@ -219,7 +219,7 @@ func TestAddToGroupedMetric(t *testing.T) { metrics := ilms.At(0).Metrics() // Verify if all metrics are generated, including NaN, Inf values require.Equal(t, 19, metrics.Len(), "mock metric creation failed") - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { err := addToGroupedMetric(metrics.At(i), groupedMetrics, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, metrics.At(i).Type(), 0), @@ -338,7 +338,7 @@ func TestAddToGroupedMetric(t *testing.T) { obs, logs := observer.New(zap.WarnLevel) testCfg.logger = zap.New(obs) - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { err := addToGroupedMetric(metrics.At(i), groupedMetrics, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, metrics.At(i).Type(), 0), @@ -421,7 +421,7 @@ func TestAddToGroupedMetric(t *testing.T) { metrics := ilms.At(0).Metrics() assert.Equal(t, 1, metrics.Len()) - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { err := addToGroupedMetric(metrics.At(i), groupedMetrics, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, metrics.At(i).Type(), 0), @@ -498,9 +498,9 @@ func BenchmarkAddToGroupedMetric(b *testing.B) { numMetrics := metrics.Len() b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { groupedMetrics := make(map[any]*groupedMetric) - for i := 0; i < numMetrics; i++ { + for i := range numMetrics { metadata := generateTestMetricMetadata("namespace", int64(1596151098037), "log-group", "log-stream", "cloudwatch-otel", metrics.At(i).Type(), 0) err := addToGroupedMetric(metrics.At(i), groupedMetrics, metadata, true, nil, testCfg, emfCalcs) assert.NoError(b, err) diff --git a/exporter/awsemfexporter/metric_translator.go b/exporter/awsemfexporter/metric_translator.go index 09844e132c7ee..cd5f448366197 100644 --- a/exporter/awsemfexporter/metric_translator.go +++ b/exporter/awsemfexporter/metric_translator.go @@ -135,14 +135,14 @@ func (mt metricTranslator) translateOTelToGroupedMetric(rm pmetric.ResourceMetri if receiver, ok := rm.Resource().Attributes().Get(attributeReceiver); ok { metricReceiver = receiver.Str() } - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { ilm := ilms.At(j) if ilm.Scope().Name() != "" { instrumentationScopeName = ilm.Scope().Name() } metrics := ilm.Metrics() - for k := 0; k < metrics.Len(); k++ { + for k := range metrics.Len() { metric := metrics.At(k) metadata := cWMetricMetadata{ groupedMetricMetadata: groupedMetricMetadata{ diff --git a/exporter/awsemfexporter/metric_translator_test.go b/exporter/awsemfexporter/metric_translator_test.go index beaedb4d2ed71..ce8bd218273fd 100644 --- a/exporter/awsemfexporter/metric_translator_test.go +++ b/exporter/awsemfexporter/metric_translator_test.go @@ -79,7 +79,7 @@ func createTestResourceMetrics() pmetric.ResourceMetrics { q2.SetQuantile(1) q2.SetValue(5) - for i := 0; i < 2; i++ { + for i := range 2 { m = sm.Metrics().AppendEmpty() m.SetName("spanCounter") m.SetDescription("Counting all the spans") @@ -134,7 +134,7 @@ func (d dimensionality) Less(i, j int) bool { dim1 := d[i] dim2 := d[j] - for k := 0; k < min(len(dim1), len(dim2)); k++ { + for k := range min(len(dim1), len(dim2)) { if dim1[k] != dim2[k] { return dim1[k] < dim2[k] } @@ -2006,7 +2006,7 @@ func BenchmarkTranslateOtToGroupedMetricWithInstrLibrary(b *testing.B) { defer require.NoError(b, translator.Shutdown()) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { groupedMetric := make(map[any]*groupedMetric) err := translator.translateOTelToGroupedMetric(rm, groupedMetric, config) assert.NoError(b, err) @@ -2029,7 +2029,7 @@ func BenchmarkTranslateOtToGroupedMetricWithoutConfigReplacePattern(b *testing.B defer require.NoError(b, translator.Shutdown()) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { groupedMetrics := make(map[any]*groupedMetric) err := translator.translateOTelToGroupedMetric(rm, groupedMetrics, config) assert.NoError(b, err) @@ -2052,7 +2052,7 @@ func BenchmarkTranslateOtToGroupedMetricWithConfigReplaceWithResource(b *testing defer require.NoError(b, translator.Shutdown()) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { groupedMetrics := make(map[any]*groupedMetric) err := translator.translateOTelToGroupedMetric(rm, groupedMetrics, config) assert.NoError(b, err) @@ -2075,7 +2075,7 @@ func BenchmarkTranslateOtToGroupedMetricWithConfigReplaceWithLabel(b *testing.B) defer require.NoError(b, translator.Shutdown()) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { groupedMetrics := make(map[any]*groupedMetric) err := translator.translateOTelToGroupedMetric(rm, groupedMetrics, config) assert.NoError(b, err) @@ -2093,7 +2093,7 @@ func BenchmarkTranslateOtToGroupedMetricWithoutInstrLibrary(b *testing.B) { defer require.NoError(b, translator.Shutdown()) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { groupedMetrics := make(map[any]*groupedMetric) err := translator.translateOTelToGroupedMetric(rm, groupedMetrics, config) assert.NoError(b, err) @@ -2130,7 +2130,7 @@ func BenchmarkTranslateGroupedMetricToCWMetric(b *testing.B) { } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { translateGroupedMetricToCWMetric(groupedMetric, config) } } @@ -2172,7 +2172,7 @@ func BenchmarkTranslateGroupedMetricToCWMetricWithFiltering(b *testing.B) { } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { translateGroupedMetricToCWMetric(groupedMetric, config) } } @@ -2199,7 +2199,7 @@ func BenchmarkTranslateCWMetricToEMF(b *testing.B) { } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { _, err := translateCWMetricToEMF(met, &Config{}) require.NoError(b, err) } diff --git a/exporter/awskinesisexporter/internal/batch/batch_test.go b/exporter/awskinesisexporter/internal/batch/batch_test.go index 6cf778754359b..a9b4216850331 100644 --- a/exporter/awskinesisexporter/internal/batch/batch_test.go +++ b/exporter/awskinesisexporter/internal/batch/batch_test.go @@ -15,7 +15,7 @@ func TestBatchingMessages(t *testing.T) { t.Parallel() b := batch.New() - for i := 0; i < 948; i++ { + for range 948 { assert.NoError(t, b.AddRecord([]byte("foobar"), "fixed-string"), "Must not error when adding elements into the batch") } @@ -41,7 +41,7 @@ func TestCustomBatchSizeConstraints(t *testing.T) { batch.WithMaxRecordsPerBatch(1), ) const records = 203 - for i := 0; i < records; i++ { + for range records { assert.NoError(t, b.AddRecord([]byte("foobar"), "fixed-string"), "Must not error when adding elements into the batch") } assert.Len(t, b.Chunk(), records, "Must have one batch per record added") @@ -49,13 +49,13 @@ func TestCustomBatchSizeConstraints(t *testing.T) { func BenchmarkChunkingRecords(b *testing.B) { bt := batch.New() - for i := 0; i < 948; i++ { + for range 948 { assert.NoError(b, bt.AddRecord([]byte("foobar"), "fixed-string"), "Must not error when adding elements into the batch") } b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { assert.Len(b, bt.Chunk(), 2, "Must have exactly two chunks") } } diff --git a/exporter/awskinesisexporter/internal/batch/encode_marshaler.go b/exporter/awskinesisexporter/internal/batch/encode_marshaler.go index 7e29d606cd31d..23d226755acbf 100644 --- a/exporter/awskinesisexporter/internal/batch/encode_marshaler.go +++ b/exporter/awskinesisexporter/internal/batch/encode_marshaler.go @@ -37,7 +37,7 @@ func (bm *batchMarshaller) Logs(ld plog.Logs) (*Batch, error) { export.ResourceLogs().AppendEmpty() var errs error - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { line := ld.ResourceLogs().At(i) line.CopyTo(export.ResourceLogs().At(0)) @@ -69,7 +69,7 @@ func (bm *batchMarshaller) Traces(td ptrace.Traces) (*Batch, error) { export.ResourceSpans().AppendEmpty() var errs error - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { span := td.ResourceSpans().At(i) span.CopyTo(export.ResourceSpans().At(0)) @@ -101,7 +101,7 @@ func (bm *batchMarshaller) Metrics(md pmetric.Metrics) (*Batch, error) { export.ResourceMetrics().AppendEmpty() var errs error - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { datapoint := md.ResourceMetrics().At(i) datapoint.CopyTo(export.ResourceMetrics().At(0)) diff --git a/exporter/awskinesisexporter/internal/batch/encoder_test.go b/exporter/awskinesisexporter/internal/batch/encoder_test.go index 510157d1de407..cb1b33f56110d 100644 --- a/exporter/awskinesisexporter/internal/batch/encoder_test.go +++ b/exporter/awskinesisexporter/internal/batch/encoder_test.go @@ -13,7 +13,7 @@ import ( func NewTestTraces(spanCount int) ptrace.Traces { traces := ptrace.NewTraces() - for i := 0; i < spanCount; i++ { + for range spanCount { span := traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName("foo") span.SetStartTimestamp(pcommon.Timestamp(10)) @@ -28,7 +28,7 @@ func NewTestTraces(spanCount int) ptrace.Traces { func NewTestMetrics(metricCount int) pmetric.Metrics { metrics := pmetric.NewMetrics() - for i := 0; i < metricCount; i++ { + for i := range metricCount { metric := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() metric.SetName("foo") metric.SetUnit("bar") @@ -41,7 +41,7 @@ func NewTestMetrics(metricCount int) pmetric.Metrics { func NewTestLogs(logCount int) plog.Logs { logs := plog.NewLogs() - for i := 0; i < logCount; i++ { + for range logCount { log := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() log.SetSeverityText("bar") } diff --git a/exporter/awskinesisexporter/internal/compress/compresser_test.go b/exporter/awskinesisexporter/internal/compress/compresser_test.go index 21651360adb06..5f8dcc880a542 100644 --- a/exporter/awskinesisexporter/internal/compress/compresser_test.go +++ b/exporter/awskinesisexporter/internal/compress/compresser_test.go @@ -123,13 +123,13 @@ func benchmarkCompressor(b *testing.B, format string, length int) { require.NotNil(b, compressor, "Must have a valid compressor") data := make([]byte, length) - for i := 0; i < length; i++ { + for i := range length { data[i] = byte(rand.Int32()) } b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { out, err := compressor(data) assert.NoError(b, err, "Must not error when processing data") assert.NotNil(b, out, "Must have a valid byte array after") @@ -178,12 +178,12 @@ func concurrentCompressFunc(t *testing.T) { // since it is where the chances of having race conditions are bigger dataLength := 131072 - for j := 0; j < numWorkers; j++ { + for range numWorkers { go func() { defer wg.Done() data := make([]byte, dataLength) - for i := 0; i < dataLength; i++ { + for i := range dataLength { data[i] = byte(rand.Int32()) } diff --git a/exporter/awskinesisexporter/internal/producer/batcher_bench_test.go b/exporter/awskinesisexporter/internal/producer/batcher_bench_test.go index dc5d7de644552..89f158ba45da0 100644 --- a/exporter/awskinesisexporter/internal/producer/batcher_bench_test.go +++ b/exporter/awskinesisexporter/internal/producer/batcher_bench_test.go @@ -23,14 +23,14 @@ func benchXEmptyMessages(b *testing.B, msgCount int) { require.NoError(b, err, "Must have a valid producer") bt := batch.New() - for i := 0; i < msgCount; i++ { + for range msgCount { assert.NoError(b, bt.AddRecord([]byte("foobar"), "fixed-key")) } b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { assert.NoError(b, producer.Put(context.Background(), bt)) } } diff --git a/exporter/awskinesisexporter/internal/producer/batcher_test.go b/exporter/awskinesisexporter/internal/producer/batcher_test.go index f9b61e0eee175..b40589cb49646 100644 --- a/exporter/awskinesisexporter/internal/producer/batcher_test.go +++ b/exporter/awskinesisexporter/internal/producer/batcher_test.go @@ -78,7 +78,7 @@ func TestBatchedExporter(t *testing.T) { } bt := batch.New() - for i := 0; i < 500; i++ { + for range 500 { assert.NoError(t, bt.AddRecord([]byte("foobar"), "fixed-key")) } diff --git a/exporter/awss3exporter/body_marshaler.go b/exporter/awss3exporter/body_marshaler.go index b526c3ea4e6ee..f27a53ede4b60 100644 --- a/exporter/awss3exporter/body_marshaler.go +++ b/exporter/awss3exporter/body_marshaler.go @@ -25,14 +25,14 @@ func newbodyMarshaler() bodyMarshaler { func (bodyMarshaler) MarshalLogs(ld plog.Logs) ([]byte, error) { buf := bytes.Buffer{} rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rl := rls.At(i) ills := rl.ScopeLogs() - for j := 0; j < ills.Len(); j++ { + for j := range ills.Len() { ils := ills.At(j) logs := ils.LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { lr := logs.At(k) body := lr.Body() buf.WriteString(body.AsString()) diff --git a/exporter/awss3exporter/internal/upload/partition_test.go b/exporter/awss3exporter/internal/upload/partition_test.go index 175388c7c6c77..a4e0c8bf029c4 100644 --- a/exporter/awss3exporter/internal/upload/partition_test.go +++ b/exporter/awss3exporter/internal/upload/partition_test.go @@ -177,7 +177,7 @@ func TestPartitionKeyInputsUniqueKey(t *testing.T) { // is not repeated seen := make(map[string]struct{}) - for i := 0; i < 500; i++ { + for range 500 { uv := (&PartitionKeyBuilder{}).uniqueKey() _, ok := seen[uv] assert.False(t, ok, "Must not have repeated partition key %q", uv) diff --git a/exporter/awss3exporter/sumo_marshaler.go b/exporter/awss3exporter/sumo_marshaler.go index 9ec2c54e64495..a2c397288d27f 100644 --- a/exporter/awss3exporter/sumo_marshaler.go +++ b/exporter/awss3exporter/sumo_marshaler.go @@ -76,7 +76,7 @@ const ( func (sumoMarshaler) MarshalLogs(ld plog.Logs) ([]byte, error) { buf := bytes.Buffer{} rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rl := rls.At(i) ra := rl.Resource().Attributes() sourceCategory, exists := ra.Get(SourceCategoryKey) @@ -119,10 +119,10 @@ func (sumoMarshaler) MarshalLogs(ld plog.Logs) ([]byte, error) { } ills := rl.ScopeLogs() - for j := 0; j < ills.Len(); j++ { + for j := range ills.Len() { ils := ills.At(j) logs := ils.LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { lr := logs.At(k) dateVal := lr.ObservedTimestamp() diff --git a/exporter/awsxrayexporter/awsxray.go b/exporter/awsxrayexporter/awsxray.go index dc01d1d8f9210..74d740d51ed93 100644 --- a/exporter/awsxrayexporter/awsxray.go +++ b/exporter/awsxrayexporter/awsxray.go @@ -99,12 +99,12 @@ func newTracesExporter( func extractResourceSpans(config component.Config, logger *zap.Logger, td ptrace.Traces) []*string { documents := make([]*string, 0, td.SpanCount()) - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { rspans := td.ResourceSpans().At(i) resource := rspans.Resource() - for j := 0; j < rspans.ScopeSpans().Len(); j++ { + for j := range rspans.ScopeSpans().Len() { spans := rspans.ScopeSpans().At(j).Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { documentsForSpan, localErr := translator.MakeSegmentDocuments( spans.At(k), resource, config.(*Config).IndexedAttributes, diff --git a/exporter/awsxrayexporter/awsxray_test.go b/exporter/awsxrayexporter/awsxray_test.go index fbee7121ce28b..48eb8158953c3 100644 --- a/exporter/awsxrayexporter/awsxray_test.go +++ b/exporter/awsxrayexporter/awsxray_test.go @@ -95,7 +95,7 @@ func TestTelemetryEnabled(t *testing.T) { func BenchmarkForTracesExporter(b *testing.B) { traceExporter := initializeTracesExporter(b, generateConfig(b), telemetrytest.NewNopRegistry()) - for i := 0; i < b.N; i++ { + for range b.N { b.StopTimer() ctx := context.Background() td := constructSpanData() diff --git a/exporter/awsxrayexporter/internal/translator/aws.go b/exporter/awsxrayexporter/internal/translator/aws.go index e11bcb721e1c7..be71d7bdb0a4f 100644 --- a/exporter/awsxrayexporter/internal/translator/aws.go +++ b/exporter/awsxrayexporter/internal/translator/aws.go @@ -165,7 +165,7 @@ func makeAws(attributes map[string]pcommon.Value, resource pcommon.Resource, log } else if value.Slice().Len() > 1 { tableName = "" tableNames = []string{} - for i := 0; i < value.Slice().Len(); i++ { + for i := range value.Slice().Len() { tableNames = append(tableNames, value.Slice().At(i).Str()) } } @@ -314,7 +314,7 @@ func normalizeToSlice(v pcommon.Value) pcommon.Slice { // populated, or given an array of just log group names, create the LogGroupMetadata objects with arn omitted func getLogGroupMetadata(logGroups pcommon.Slice, isArn bool) []awsxray.LogGroupMetadata { var lgm []awsxray.LogGroupMetadata - for i := 0; i < logGroups.Len(); i++ { + for i := range logGroups.Len() { if isArn { lgm = append(lgm, awsxray.LogGroupMetadata{ Arn: awsxray.String(logGroups.At(i).Str()), diff --git a/exporter/awsxrayexporter/internal/translator/cause.go b/exporter/awsxrayexporter/internal/translator/cause.go index af1028b887c05..4b3e866a9efd1 100644 --- a/exporter/awsxrayexporter/internal/translator/cause.go +++ b/exporter/awsxrayexporter/internal/translator/cause.go @@ -44,7 +44,7 @@ func makeCause(span ptrace.Span, attributes map[string]pcommon.Value, resource p isAwsSdkSpan := isAwsSdkSpan(span) hasExceptionEvents := false hasAwsIndividualHTTPError := false - for i := 0; i < span.Events().Len(); i++ { + for i := range span.Events().Len() { event := span.Events().At(i) if event.Name() == ExceptionEventName { hasExceptionEvents = true @@ -69,7 +69,7 @@ func makeCause(span ptrace.Span, attributes map[string]pcommon.Value, resource p } var exceptions []awsxray.Exception - for i := 0; i < span.Events().Len(); i++ { + for i := range span.Events().Len() { event := span.Events().At(i) if event.Name() == ExceptionEventName { exceptionType := "" diff --git a/exporter/awsxrayexporter/internal/translator/http.go b/exporter/awsxrayexporter/internal/translator/http.go index 347f77f1cd36f..542e7d67a1a6d 100644 --- a/exporter/awsxrayexporter/internal/translator/http.go +++ b/exporter/awsxrayexporter/internal/translator/http.go @@ -152,7 +152,7 @@ func extractResponseSizeFromEvents(span ptrace.Span) int64 { if size != 0 { return size } - for i := 0; i < span.Events().Len(); i++ { + for i := range span.Events().Len() { event := span.Events().At(i) size = extractResponseSizeFromAttributes(event.Attributes()) if size != 0 { diff --git a/exporter/awsxrayexporter/internal/translator/segment.go b/exporter/awsxrayexporter/internal/translator/segment.go index df94f4ed411e9..6bbfed461ddfe 100644 --- a/exporter/awsxrayexporter/internal/translator/segment.go +++ b/exporter/awsxrayexporter/internal/translator/segment.go @@ -630,7 +630,7 @@ func makeXRayAttributes(attributes map[string]pcommon.Value, resource pcommon.Re annotationKeys, ok := attributes[awsxray.AWSXraySegmentAnnotationsAttribute] if ok && annotationKeys.Type() == pcommon.ValueTypeSlice { slice := annotationKeys.Slice() - for i := 0; i < slice.Len(); i++ { + for i := range slice.Len() { value := slice.At(i) if value.Type() != pcommon.ValueTypeStr { continue diff --git a/exporter/awsxrayexporter/internal/translator/span_links.go b/exporter/awsxrayexporter/internal/translator/span_links.go index c7b3df4734238..3e1f970259dcb 100644 --- a/exporter/awsxrayexporter/internal/translator/span_links.go +++ b/exporter/awsxrayexporter/internal/translator/span_links.go @@ -13,7 +13,7 @@ import ( func makeSpanLinks(links ptrace.SpanLinkSlice, skipTimestampValidation bool) ([]awsxray.SpanLinkData, error) { var spanLinkDataArray []awsxray.SpanLinkData - for i := 0; i < links.Len(); i++ { + for i := range links.Len() { var spanLinkData awsxray.SpanLinkData link := links.At(i) diff --git a/exporter/awsxrayexporter/internal/translator/writer_pool_test.go b/exporter/awsxrayexporter/internal/translator/writer_pool_test.go index fb9729bfe540d..16d99a663a69e 100644 --- a/exporter/awsxrayexporter/internal/translator/writer_pool_test.go +++ b/exporter/awsxrayexporter/internal/translator/writer_pool_test.go @@ -37,7 +37,7 @@ func TestWriterPoolBasic(t *testing.T) { func BenchmarkWithoutPool(b *testing.B) { logger := zap.NewNop() - for i := 0; i < b.N; i++ { + for range b.N { b.StopTimer() span := constructWriterPoolSpan() b.StartTimer() @@ -53,7 +53,7 @@ func BenchmarkWithoutPool(b *testing.B) { func BenchmarkWithPool(b *testing.B) { logger := zap.NewNop() wp := newWriterPool(2048) - for i := 0; i < b.N; i++ { + for range b.N { b.StopTimer() span := constructWriterPoolSpan() b.StartTimer() diff --git a/exporter/azuredataexplorerexporter/adx_exporter.go b/exporter/azuredataexplorerexporter/adx_exporter.go index b1b76b34e0561..91806d6508eaf 100644 --- a/exporter/azuredataexplorerexporter/adx_exporter.go +++ b/exporter/azuredataexplorerexporter/adx_exporter.go @@ -73,13 +73,13 @@ func (e *adxDataProducer) ingestData(b []string) error { func (e *adxDataProducer) logsDataPusher(_ context.Context, logData plog.Logs) error { resourceLogs := logData.ResourceLogs() var logsBuffer []string - for i := 0; i < resourceLogs.Len(); i++ { + for i := range resourceLogs.Len() { resource := resourceLogs.At(i) scopeLogs := resourceLogs.At(i).ScopeLogs() - for j := 0; j < scopeLogs.Len(); j++ { + for j := range scopeLogs.Len() { scope := scopeLogs.At(j) logs := scopeLogs.At(j).LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { logData := logs.At(k) transformedADXLog := mapToAdxLog(resource.Resource(), scope.Scope(), logData, e.logger) adxLogJSONBytes, err := jsoniter.MarshalToString(transformedADXLog) @@ -101,13 +101,13 @@ func (e *adxDataProducer) logsDataPusher(_ context.Context, logData plog.Logs) e func (e *adxDataProducer) tracesDataPusher(_ context.Context, traceData ptrace.Traces) error { resourceSpans := traceData.ResourceSpans() var spanBuffer []string - for i := 0; i < resourceSpans.Len(); i++ { + for i := range resourceSpans.Len() { resource := resourceSpans.At(i) scopeSpans := resourceSpans.At(i).ScopeSpans() - for j := 0; j < scopeSpans.Len(); j++ { + for j := range scopeSpans.Len() { scope := scopeSpans.At(j) spans := scopeSpans.At(j).Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { spanData := spans.At(k) transformedADXTrace := mapToAdxTrace(resource.Resource(), scope.Scope(), spanData) adxTraceJSONBytes, err := jsoniter.MarshalToString(transformedADXTrace) diff --git a/exporter/azuredataexplorerexporter/adx_exporter_test.go b/exporter/azuredataexplorerexporter/adx_exporter_test.go index 3ea8944ddf0fd..955ff1f6e1b90 100644 --- a/exporter/azuredataexplorerexporter/adx_exporter_test.go +++ b/exporter/azuredataexplorerexporter/adx_exporter_test.go @@ -276,7 +276,7 @@ func createMetricsData(numberOfDataPoints int) pmetric.Metrics { metrics := pmetric.NewMetrics() rm := metrics.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().PutStr("k0", "v0") - for i := 0; i < numberOfDataPoints; i++ { + for range numberOfDataPoints { tsUnix := time.Unix(time.Now().Unix(), time.Now().UnixNano()) ilm := rm.ScopeMetrics().AppendEmpty() metric := ilm.Metrics().AppendEmpty() diff --git a/exporter/azuredataexplorerexporter/e2e_test.go b/exporter/azuredataexplorerexporter/e2e_test.go index 9de5207315c55..c5f73c113dda6 100644 --- a/exporter/azuredataexplorerexporter/e2e_test.go +++ b/exporter/azuredataexplorerexporter/e2e_test.go @@ -96,7 +96,7 @@ func TestCreateTracesE2E(t *testing.T) { assert.Fail(t, err.Error()) } // Validate all attributes - for i := 0; i < len(recs); i++ { + for i := range recs { assert.Equal(t, tID, recs[i].TraceID) spanBytes, err := hex.DecodeString(recs[i].SpanID) assert.Equal(t, tID, recs[i].TraceID) @@ -158,7 +158,7 @@ func TestCreateLogsE2E(t *testing.T) { assert.Fail(t, err.Error()) } // Validate all attributes - for i := 0; i < len(recs); i++ { + for i := range recs { crec := recs[i] spanBytes, err := hex.DecodeString(crec.SpanID) assert.Equal(t, tID, crec.TraceID) @@ -220,7 +220,7 @@ func TestCreateMetricsE2E(t *testing.T) { assert.Fail(t, err.Error()) } // Validate all attributes - for i := 0; i < len(recs); i++ { + for i := range recs { crec := recs[i] assert.Equal(t, metricName, crec.MetricName) assert.Equal(t, float64(metricValue), crec.MetricValue) diff --git a/exporter/azuredataexplorerexporter/metricsdata_to_adx.go b/exporter/azuredataexplorerexporter/metricsdata_to_adx.go index a80fc1342d56d..4dc358e6d4834 100644 --- a/exporter/azuredataexplorerexporter/metricsdata_to_adx.go +++ b/exporter/azuredataexplorerexporter/metricsdata_to_adx.go @@ -86,7 +86,7 @@ func mapToAdxMetric(res pcommon.Resource, md pmetric.Metric, scopeattrs map[stri case pmetric.MetricTypeGauge: dataPoints := md.Gauge().DataPoints() adxMetrics := make([]*AdxMetric, dataPoints.Len()) - for gi := 0; gi < dataPoints.Len(); gi++ { + for gi := range dataPoints.Len() { dataPoint := dataPoints.At(gi) adxMetrics[gi] = createMetric(dataPoint.Timestamp().AsTime(), dataPoint.Attributes(), func() float64 { var metricValue float64 @@ -103,7 +103,7 @@ func mapToAdxMetric(res pcommon.Resource, md pmetric.Metric, scopeattrs map[stri case pmetric.MetricTypeHistogram: dataPoints := md.Histogram().DataPoints() var adxMetrics []*AdxMetric - for gi := 0; gi < dataPoints.Len(); gi++ { + for gi := range dataPoints.Len() { dataPoint := dataPoints.At(gi) bounds := dataPoint.ExplicitBounds() counts := dataPoint.BucketCounts() @@ -132,7 +132,7 @@ func mapToAdxMetric(res pcommon.Resource, md pmetric.Metric, scopeattrs map[stri } value := uint64(0) // now create buckets for each bound. - for bi := 0; bi < bounds.Len(); bi++ { + for bi := range bounds.Len() { customMap := copyMap(map[string]any{"le": float64ToDimValue(bounds.At(bi))}, dataPoint.Attributes().AsRaw()) value += counts.At(bi) @@ -169,7 +169,7 @@ func mapToAdxMetric(res pcommon.Resource, md pmetric.Metric, scopeattrs map[stri case pmetric.MetricTypeSum: dataPoints := md.Sum().DataPoints() adxMetrics := make([]*AdxMetric, dataPoints.Len()) - for gi := 0; gi < dataPoints.Len(); gi++ { + for gi := range dataPoints.Len() { dataPoint := dataPoints.At(gi) adxMetrics[gi] = createMetric(dataPoint.Timestamp().AsTime(), dataPoint.Attributes(), func() float64 { var metricValue float64 @@ -186,7 +186,7 @@ func mapToAdxMetric(res pcommon.Resource, md pmetric.Metric, scopeattrs map[stri case pmetric.MetricTypeSummary: dataPoints := md.Summary().DataPoints() var adxMetrics []*AdxMetric - for gi := 0; gi < dataPoints.Len(); gi++ { + for gi := range dataPoints.Len() { dataPoint := dataPoints.At(gi) // first, add one event for sum, and one for count { @@ -207,7 +207,7 @@ func mapToAdxMetric(res pcommon.Resource, md pmetric.Metric, scopeattrs map[stri pmetric.MetricTypeSummary)) } // now create values for each quantile. - for bi := 0; bi < dataPoint.QuantileValues().Len(); bi++ { + for bi := range dataPoint.QuantileValues().Len() { dp := dataPoint.QuantileValues().At(bi) quantileName := fmt.Sprintf("%s_%s", md.Name(), strconv.FormatFloat(dp.Quantile(), 'f', -1, 64)) metricQuantile := map[string]any{ @@ -241,15 +241,15 @@ func mapToAdxMetric(res pcommon.Resource, md pmetric.Metric, scopeattrs map[stri func rawMetricsToAdxMetrics(_ context.Context, metrics pmetric.Metrics, logger *zap.Logger) []*AdxMetric { var transformedAdxMetrics []*AdxMetric resourceMetric := metrics.ResourceMetrics() - for i := 0; i < resourceMetric.Len(); i++ { + for i := range resourceMetric.Len() { res := resourceMetric.At(i).Resource() scopeMetrics := resourceMetric.At(i).ScopeMetrics() - for j := 0; j < scopeMetrics.Len(); j++ { + for j := range scopeMetrics.Len() { scopeMetric := scopeMetrics.At(j) metrics := scopeMetric.Metrics() // get details of the scope from the scope metric scopeAttr := getScopeMap(scopeMetric.Scope()) - for k := 0; k < metrics.Len(); k++ { + for k := range metrics.Len() { transformedAdxMetrics = append(transformedAdxMetrics, mapToAdxMetric(res, metrics.At(k), scopeAttr, logger)...) } } diff --git a/exporter/azuredataexplorerexporter/tracesdata_to_adx.go b/exporter/azuredataexplorerexporter/tracesdata_to_adx.go index 8875198342c88..48060a0005b2d 100644 --- a/exporter/azuredataexplorerexporter/tracesdata_to_adx.go +++ b/exporter/azuredataexplorerexporter/tracesdata_to_adx.go @@ -71,7 +71,7 @@ func mapToAdxTrace(resource pcommon.Resource, scope pcommon.InstrumentationScope func getEventsData(sd ptrace.Span) []*Event { events := make([]*Event, sd.Events().Len()) - for i := 0; i < sd.Events().Len(); i++ { + for i := range sd.Events().Len() { event := &Event{ Timestamp: sd.Events().At(i).Timestamp().AsTime().Format(time.RFC3339Nano), EventName: sd.Events().At(i).Name(), @@ -84,7 +84,7 @@ func getEventsData(sd ptrace.Span) []*Event { func getLinksData(sd ptrace.Span) []*Link { links := make([]*Link, sd.Links().Len()) - for i := 0; i < sd.Links().Len(); i++ { + for i := range sd.Links().Len() { link := &Link{ TraceID: traceutil.TraceIDToHexOrEmptyString(sd.Links().At(i).TraceID()), SpanID: traceutil.SpanIDToHexOrEmptyString(sd.Links().At(i).SpanID()), diff --git a/exporter/azuremonitorexporter/logexporter.go b/exporter/azuremonitorexporter/logexporter.go index 1eacc80261b06..08769dccd666e 100644 --- a/exporter/azuremonitorexporter/logexporter.go +++ b/exporter/azuremonitorexporter/logexporter.go @@ -22,13 +22,13 @@ func (exporter *logExporter) onLogData(_ context.Context, logData plog.Logs) err resourceLogs := logData.ResourceLogs() logPacker := newLogPacker(exporter.logger) - for i := 0; i < resourceLogs.Len(); i++ { + for i := range resourceLogs.Len() { scopeLogs := resourceLogs.At(i).ScopeLogs() resource := resourceLogs.At(i).Resource() - for j := 0; j < scopeLogs.Len(); j++ { + for j := range scopeLogs.Len() { logs := scopeLogs.At(j).LogRecords() scope := scopeLogs.At(j).Scope() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { envelope := logPacker.LogRecordToEnvelope(logs.At(k), resource, scope) envelope.IKey = string(exporter.config.InstrumentationKey) exporter.transportChannel.Send(envelope) diff --git a/exporter/azuremonitorexporter/metric_to_envelopes.go b/exporter/azuremonitorexporter/metric_to_envelopes.go index 95f405ce69ca8..5b5e01394b775 100644 --- a/exporter/azuremonitorexporter/metric_to_envelopes.go +++ b/exporter/azuremonitorexporter/metric_to_envelopes.go @@ -117,7 +117,7 @@ func newScalarMetric(name string, dataPointSlice pmetric.NumberDataPointSlice) * func (m scalarMetric) getTimedDataPoints() []*timedMetricDataPoint { timedDataPoints := make([]*timedMetricDataPoint, m.dataPointSlice.Len()) - for i := 0; i < m.dataPointSlice.Len(); i++ { + for i := range m.dataPointSlice.Len() { numberDataPoint := m.dataPointSlice.At(i) dataPoint := contracts.NewDataPoint() dataPoint.Name = m.name @@ -154,7 +154,7 @@ func newHistogramMetric(name string, dataPointSlice pmetric.HistogramDataPointSl func (m histogramMetric) getTimedDataPoints() []*timedMetricDataPoint { timedDataPoints := make([]*timedMetricDataPoint, m.dataPointSlice.Len()) - for i := 0; i < m.dataPointSlice.Len(); i++ { + for i := range m.dataPointSlice.Len() { histogramDataPoint := m.dataPointSlice.At(i) dataPoint := contracts.NewDataPoint() dataPoint.Name = m.name @@ -187,7 +187,7 @@ func newExponentialHistogramMetric(name string, dataPointSlice pmetric.Exponenti func (m exponentialHistogramMetric) getTimedDataPoints() []*timedMetricDataPoint { timedDataPoints := make([]*timedMetricDataPoint, m.dataPointSlice.Len()) - for i := 0; i < m.dataPointSlice.Len(); i++ { + for i := range m.dataPointSlice.Len() { exponentialHistogramDataPoint := m.dataPointSlice.At(i) dataPoint := contracts.NewDataPoint() dataPoint.Name = m.name @@ -220,7 +220,7 @@ func newSummaryMetric(name string, dataPointSlice pmetric.SummaryDataPointSlice) func (m summaryMetric) getTimedDataPoints() []*timedMetricDataPoint { timedDataPoints := make([]*timedMetricDataPoint, m.dataPointSlice.Len()) - for i := 0; i < m.dataPointSlice.Len(); i++ { + for i := range m.dataPointSlice.Len() { summaryDataPoint := m.dataPointSlice.At(i) dataPoint := contracts.NewDataPoint() dataPoint.Name = m.name diff --git a/exporter/azuremonitorexporter/metricexporter.go b/exporter/azuremonitorexporter/metricexporter.go index b0b3bbd2bd1af..f8aaf442e7d8f 100644 --- a/exporter/azuremonitorexporter/metricexporter.go +++ b/exporter/azuremonitorexporter/metricexporter.go @@ -22,13 +22,13 @@ type metricExporter struct { func (exporter *metricExporter) onMetricData(_ context.Context, metricData pmetric.Metrics) error { resourceMetrics := metricData.ResourceMetrics() - for i := 0; i < resourceMetrics.Len(); i++ { + for i := range resourceMetrics.Len() { scopeMetrics := resourceMetrics.At(i).ScopeMetrics() resource := resourceMetrics.At(i).Resource() - for j := 0; j < scopeMetrics.Len(); j++ { + for j := range scopeMetrics.Len() { metrics := scopeMetrics.At(j).Metrics() scope := scopeMetrics.At(j).Scope() - for k := 0; k < metrics.Len(); k++ { + for k := range metrics.Len() { for _, envelope := range exporter.packer.MetricToEnvelopes(metrics.At(k), resource, scope) { envelope.IKey = string(exporter.config.InstrumentationKey) exporter.transportChannel.Send(envelope) diff --git a/exporter/azuremonitorexporter/trace_to_envelope.go b/exporter/azuremonitorexporter/trace_to_envelope.go index 7839aa373502e..bc8a930fef6ad 100644 --- a/exporter/azuremonitorexporter/trace_to_envelope.go +++ b/exporter/azuremonitorexporter/trace_to_envelope.go @@ -124,7 +124,7 @@ func spanToEnvelopes( envelopes = append(envelopes, envelope) // Now add the span events. We always export exception events. - for i := 0; i < span.Events().Len(); i++ { + for i := range span.Events().Len() { spanEvent := span.Events().At(i) // skip non-exception events if configured diff --git a/exporter/azuremonitorexporter/traceiteration.go b/exporter/azuremonitorexporter/traceiteration.go index f3b5a8af5a136..5a6853f4bce67 100644 --- a/exporter/azuremonitorexporter/traceiteration.go +++ b/exporter/azuremonitorexporter/traceiteration.go @@ -25,12 +25,12 @@ func accept(traces ptrace.Traces, v TraceVisitor) { resourceSpans := traces.ResourceSpans() // Walk each ResourceSpans instance - for i := 0; i < resourceSpans.Len(); i++ { + for i := range resourceSpans.Len() { rs := resourceSpans.At(i) resource := rs.Resource() scopeSpansSlice := rs.ScopeSpans() - for j := 0; j < scopeSpansSlice.Len(); j++ { + for j := range scopeSpansSlice.Len() { scopeSpans := scopeSpansSlice.At(j) // instrumentation library is optional scope := scopeSpans.Scope() @@ -39,7 +39,7 @@ func accept(traces ptrace.Traces, v TraceVisitor) { continue } - for k := 0; k < spansSlice.Len(); k++ { + for k := range spansSlice.Len() { if ok := v.visit(resource, scope, spansSlice.At(k)); !ok { return } diff --git a/exporter/bmchelixexporter/internal/operationsmanagement/metrics_producer.go b/exporter/bmchelixexporter/internal/operationsmanagement/metrics_producer.go index a5e7df4b8ea59..1ce33b53c10f7 100644 --- a/exporter/bmchelixexporter/internal/operationsmanagement/metrics_producer.go +++ b/exporter/bmchelixexporter/internal/operationsmanagement/metrics_producer.go @@ -43,7 +43,7 @@ func (mp *MetricsProducer) ProduceHelixPayload(metrics pmetric.Metrics) ([]BMCHe // Iterate through each pmetric.ResourceMetrics instance rmetrics := metrics.ResourceMetrics() - for i := 0; i < rmetrics.Len(); i++ { + for i := range rmetrics.Len() { resourceMetric := rmetrics.At(i) resource := resourceMetric.Resource() @@ -52,12 +52,12 @@ func (mp *MetricsProducer) ProduceHelixPayload(metrics pmetric.Metrics) ([]BMCHe // Iterate through each pmetric.ScopeMetrics within the pmetric.ResourceMetrics instance scopeMetrics := resourceMetric.ScopeMetrics() - for j := 0; j < scopeMetrics.Len(); j++ { + for j := range scopeMetrics.Len() { scopeMetric := scopeMetrics.At(j) // Iterate through each individual pmetric.Metric instance metrics := scopeMetric.Metrics() - for k := 0; k < metrics.Len(); k++ { + for k := range metrics.Len() { metric := metrics.At(k) // Create the payload for each metric @@ -136,12 +136,12 @@ func (mp *MetricsProducer) createHelixMetric(metric pmetric.Metric, resourceAttr switch metric.Type() { case pmetric.MetricTypeSum: dataPoints := metric.Sum().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { samples = mp.processDatapoint(samples, dataPoints.At(i), labels, metric, resourceAttrs) } case pmetric.MetricTypeGauge: dataPoints := metric.Gauge().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { samples = mp.processDatapoint(samples, dataPoints.At(i), labels, metric, resourceAttrs) } default: diff --git a/exporter/carbonexporter/exporter_test.go b/exporter/carbonexporter/exporter_test.go index eeeddb1e5a8fb..eea3b057d28f8 100644 --- a/exporter/carbonexporter/exporter_test.go +++ b/exporter/carbonexporter/exporter_test.go @@ -137,11 +137,11 @@ func TestConsumeMetrics(t *testing.T) { startCh := make(chan struct{}) var writersWG sync.WaitGroup writersWG.Add(tt.numProducers) - for i := 0; i < tt.numProducers; i++ { + for range tt.numProducers { go func() { defer writersWG.Done() <-startCh - for j := 0; j < tt.writesPerProducer; j++ { + for range tt.writesPerProducer { assert.NoError(t, exp.ConsumeMetrics(context.Background(), tt.md)) } }() @@ -240,7 +240,7 @@ func TestConnPoolWithIdleMaxConnections(t *testing.T) { // Create connections and var conns []net.Conn - for i := 0; i < maxIdleConns; i++ { + for i := range maxIdleConns { conn, err := cp.get() require.NoError(t, err) conns = append(conns, conn) @@ -252,7 +252,7 @@ func TestConnPoolWithIdleMaxConnections(t *testing.T) { cp.put(conn) } - for i := 0; i < maxIdleConns+1; i++ { + for i := range maxIdleConns + 1 { conn, err := cp.get() require.NoError(t, err) _, err = conn.Write([]byte(metricDataToPlaintext(generateSmallBatch()))) @@ -289,7 +289,7 @@ func generateMetricsBatch(size int) pmetric.Metrics { rm.Resource().Attributes().PutStr(conventions.AttributeServiceName, "carbon") ms := rm.ScopeMetrics().AppendEmpty().Metrics() - for i := 0; i < size; i++ { + for i := range size { m := ms.AppendEmpty() m.SetName("test_" + strconv.Itoa(i)) dp := m.SetEmptyGauge().DataPoints().AppendEmpty() diff --git a/exporter/carbonexporter/metricdata_to_plaintext.go b/exporter/carbonexporter/metricdata_to_plaintext.go index 29d5f1d5b40eb..9c03629b74cf1 100644 --- a/exporter/carbonexporter/metricdata_to_plaintext.go +++ b/exporter/carbonexporter/metricdata_to_plaintext.go @@ -88,11 +88,11 @@ func metricDataToPlaintext(md pmetric.Metrics) string { buf.Reset() defer writerPool.Put(buf) - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rm := md.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { sm := rm.ScopeMetrics().At(j) - for k := 0; k < sm.Metrics().Len(); k++ { + for k := range sm.Metrics().Len() { metric := sm.Metrics().At(k) if metric.Name() == "" { // TODO: log error info @@ -116,7 +116,7 @@ func metricDataToPlaintext(md pmetric.Metrics) string { } func writeNumberDataPoints(buf *bytes.Buffer, metricName string, dps pmetric.NumberDataPointSlice) { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) var valueStr string switch dp.ValueType() { @@ -154,7 +154,7 @@ func formatHistogramDataPoints( metricName string, dps pmetric.HistogramDataPointSlice, ) { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) timestampStr := formatTimestamp(dp.Timestamp()) @@ -165,13 +165,13 @@ func formatHistogramDataPoints( bounds := dp.ExplicitBounds().AsRaw() carbonBounds := make([]string, len(bounds)+1) - for i := 0; i < len(bounds); i++ { + for i := range bounds { carbonBounds[i] = formatFloatForLabel(bounds[i]) } carbonBounds[len(carbonBounds)-1] = infinityCarbonValue bucketPath := buildPath(metricName+distributionBucketSuffix, dp.Attributes()) - for j := 0; j < dp.BucketCounts().Len(); j++ { + for j := range dp.BucketCounts().Len() { writeLine( buf, bucketPath+distributionUpperBoundTagBeforeValue+carbonBounds[j], @@ -198,7 +198,7 @@ func formatSummaryDataPoints( metricName string, dps pmetric.SummaryDataPointSlice, ) { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) timestampStr := formatTimestamp(dp.Timestamp()) @@ -209,7 +209,7 @@ func formatSummaryDataPoints( } quantilePath := buildPath(metricName+summaryQuantileSuffix, dp.Attributes()) - for j := 0; j < dp.QuantileValues().Len(); j++ { + for j := range dp.QuantileValues().Len() { writeLine( buf, quantilePath+summaryQuantileTagBeforeValue+formatFloatForLabel(dp.QuantileValues().At(j).Quantile()*100), diff --git a/exporter/carbonexporter/metricdata_to_plaintext_test.go b/exporter/carbonexporter/metricdata_to_plaintext_test.go index b1910334c3aaa..9c4b8fd43e496 100644 --- a/exporter/carbonexporter/metricdata_to_plaintext_test.go +++ b/exporter/carbonexporter/metricdata_to_plaintext_test.go @@ -327,7 +327,7 @@ func BenchmarkConsumeMetricsDefault(b *testing.B) { md := generateSmallBatch() b.ResetTimer() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for range b.N { assert.Len(b, metricDataToPlaintext(md), 62) } } diff --git a/exporter/cassandraexporter/exporter_logs.go b/exporter/cassandraexporter/exporter_logs.go index b9c2b66032c06..eaaaf4d6e4ccf 100644 --- a/exporter/cassandraexporter/exporter_logs.go +++ b/exporter/cassandraexporter/exporter_logs.go @@ -110,14 +110,14 @@ func parseCreateLogTableSQL(cfg *Config) string { func (e *logsExporter) pushLogsData(ctx context.Context, ld plog.Logs) error { start := time.Now() - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { logs := ld.ResourceLogs().At(i) res := logs.Resource() resAttr := attributesToMap(res.Attributes().AsRaw()) - for j := 0; j < logs.ScopeLogs().Len(); j++ { + for j := range logs.ScopeLogs().Len() { rs := logs.ScopeLogs().At(j).LogRecords() - for k := 0; k < rs.Len(); k++ { + for k := range rs.Len() { r := rs.At(k) logAttr := attributesToMap(r.Attributes().AsRaw()) bodyByte, err := json.Marshal(r.Body().AsRaw()) diff --git a/exporter/cassandraexporter/exporter_traces.go b/exporter/cassandraexporter/exporter_traces.go index 9e39ba347df46..edcc8ceec5c07 100644 --- a/exporter/cassandraexporter/exporter_traces.go +++ b/exporter/cassandraexporter/exporter_traces.go @@ -109,14 +109,14 @@ func (e *tracesExporter) Shutdown(_ context.Context) error { func (e *tracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) error { start := time.Now() - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { spans := td.ResourceSpans().At(i) res := spans.Resource() resAttr := attributesToMap(res.Attributes().AsRaw()) - for j := 0; j < spans.ScopeSpans().Len(); j++ { + for j := range spans.ScopeSpans().Len() { rs := spans.ScopeSpans().At(j).Spans() - for k := 0; k < rs.Len(); k++ { + for k := range rs.Len() { r := rs.At(k) spanAttr := attributesToMap(r.Attributes().AsRaw()) status := r.Status() diff --git a/exporter/clickhouseexporter/exporter_logs.go b/exporter/clickhouseexporter/exporter_logs.go index ba7c8b97c480d..a4e20a79076f5 100644 --- a/exporter/clickhouseexporter/exporter_logs.go +++ b/exporter/clickhouseexporter/exporter_logs.go @@ -71,21 +71,21 @@ func (e *logsExporter) pushLogsData(ctx context.Context, ld plog.Logs) error { _ = statement.Close() }() - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { logs := ld.ResourceLogs().At(i) res := logs.Resource() resURL := logs.SchemaUrl() resAttr := internal.AttributesToMap(res.Attributes()) serviceName := internal.GetServiceName(res.Attributes()) - for j := 0; j < logs.ScopeLogs().Len(); j++ { + for j := range logs.ScopeLogs().Len() { rs := logs.ScopeLogs().At(j).LogRecords() scopeURL := logs.ScopeLogs().At(j).SchemaUrl() scopeName := logs.ScopeLogs().At(j).Scope().Name() scopeVersion := logs.ScopeLogs().At(j).Scope().Version() scopeAttr := internal.AttributesToMap(logs.ScopeLogs().At(j).Scope().Attributes()) - for k := 0; k < rs.Len(); k++ { + for k := range rs.Len() { r := rs.At(k) timestamp := r.Timestamp() diff --git a/exporter/clickhouseexporter/exporter_logs_test.go b/exporter/clickhouseexporter/exporter_logs_test.go index 39e798dfd0031..e867305a68a66 100644 --- a/exporter/clickhouseexporter/exporter_logs_test.go +++ b/exporter/clickhouseexporter/exporter_logs_test.go @@ -189,7 +189,7 @@ func simpleLogs(count int) plog.Logs { sl.Scope().SetVersion("1.0.0") sl.Scope().Attributes().PutStr("lib", "clickhouse") timestamp := time.Unix(1703498029, 0) - for i := 0; i < count; i++ { + for i := range count { r := sl.LogRecords().AppendEmpty() r.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) r.SetObservedTimestamp(pcommon.NewTimestampFromTime(timestamp)) @@ -215,7 +215,7 @@ func simpleLogsWithNoTimestamp(count int) plog.Logs { sl.Scope().SetVersion("1.0.0") sl.Scope().Attributes().PutStr("lib", "clickhouse") timestamp := time.Unix(1703498029, 0) - for i := 0; i < count; i++ { + for i := range count { r := sl.LogRecords().AppendEmpty() r.SetObservedTimestamp(pcommon.NewTimestampFromTime(timestamp)) r.SetSeverityNumber(plog.SeverityNumberError2) @@ -239,7 +239,7 @@ func multipleLogsWithDifferentServiceName(count int) plog.Logs { sl.Scope().SetVersion("1.0.0") sl.Scope().Attributes().PutStr("lib", "clickhouse") timestamp := time.Unix(1703498029, 0) - for i := 0; i < count; i++ { + for i := range count { r := sl.LogRecords().AppendEmpty() r.SetObservedTimestamp(pcommon.NewTimestampFromTime(timestamp)) r.SetSeverityNumber(plog.SeverityNumberError2) diff --git a/exporter/clickhouseexporter/exporter_metrics.go b/exporter/clickhouseexporter/exporter_metrics.go index be5696a01855b..5a559fd8978bd 100644 --- a/exporter/clickhouseexporter/exporter_metrics.go +++ b/exporter/clickhouseexporter/exporter_metrics.go @@ -75,14 +75,14 @@ func (e *metricsExporter) shutdown(_ context.Context) error { func (e *metricsExporter) pushMetricsData(ctx context.Context, md pmetric.Metrics) error { metricsMap := internal.NewMetricsModel(e.tablesConfig) - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { metrics := md.ResourceMetrics().At(i) resAttr := metrics.Resource().Attributes() - for j := 0; j < metrics.ScopeMetrics().Len(); j++ { + for j := range metrics.ScopeMetrics().Len() { rs := metrics.ScopeMetrics().At(j).Metrics() scopeInstr := metrics.ScopeMetrics().At(j).Scope() scopeURL := metrics.ScopeMetrics().At(j).SchemaUrl() - for k := 0; k < rs.Len(); k++ { + for k := range rs.Len() { r := rs.At(k) var errs error //exhaustive:enforce diff --git a/exporter/clickhouseexporter/exporter_metrics_test.go b/exporter/clickhouseexporter/exporter_metrics_test.go index f875752e69e9e..d0ac5c2509f5e 100644 --- a/exporter/clickhouseexporter/exporter_metrics_test.go +++ b/exporter/clickhouseexporter/exporter_metrics_test.go @@ -164,7 +164,7 @@ func Benchmark_pushMetricsData(b *testing.B) { exporter := newTestMetricsExporter(&testing.T{}, defaultEndpoint) b.ReportAllocs() b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { err := exporter.pushMetricsData(context.TODO(), pm) require.NoError(b, err) } @@ -185,7 +185,7 @@ func simpleMetrics(count int) pmetric.Metrics { sm.Scope().SetName("Scope name 1") sm.Scope().SetVersion("Scope version 1") timestamp := time.Unix(1703498029, 0) - for i := 0; i < count; i++ { + for i := range count { // gauge m := sm.Metrics().AppendEmpty() m.SetName("gauge metrics") @@ -304,7 +304,7 @@ func simpleMetrics(count int) pmetric.Metrics { sm.Scope().SetDroppedAttributesCount(20) sm.Scope().SetName("Scope name 2") sm.Scope().SetVersion("Scope version 2") - for i := 0; i < count; i++ { + for i := range count { // gauge m := sm.Metrics().AppendEmpty() m.SetName("gauge metrics") @@ -411,7 +411,7 @@ func simpleMetrics(count int) pmetric.Metrics { sm.Scope().SetDroppedAttributesCount(20) sm.Scope().SetName("Scope name 3") sm.Scope().SetVersion("Scope version 3") - for i := 0; i < count; i++ { + for i := range count { // gauge m := sm.Metrics().AppendEmpty() m.SetName("gauge metrics") diff --git a/exporter/clickhouseexporter/exporter_traces.go b/exporter/clickhouseexporter/exporter_traces.go index 277a60e9b4370..8c0662e0675c2 100644 --- a/exporter/clickhouseexporter/exporter_traces.go +++ b/exporter/clickhouseexporter/exporter_traces.go @@ -72,17 +72,17 @@ func (e *tracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) er defer func() { _ = statement.Close() }() - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { spans := td.ResourceSpans().At(i) res := spans.Resource() resAttr := internal.AttributesToMap(res.Attributes()) serviceName := internal.GetServiceName(res.Attributes()) - for j := 0; j < spans.ScopeSpans().Len(); j++ { + for j := range spans.ScopeSpans().Len() { rs := spans.ScopeSpans().At(j).Spans() scopeName := spans.ScopeSpans().At(j).Scope().Name() scopeVersion := spans.ScopeSpans().At(j).Scope().Version() - for k := 0; k < rs.Len(); k++ { + for k := range rs.Len() { r := rs.At(k) spanAttr := internal.AttributesToMap(r.Attributes()) status := r.Status() @@ -127,7 +127,7 @@ func (e *tracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) er } func convertEvents(events ptrace.SpanEventSlice) (times []time.Time, names []string, attrs []column.IterableOrderedMap) { - for i := 0; i < events.Len(); i++ { + for i := range events.Len() { event := events.At(i) times = append(times, event.Timestamp().AsTime()) names = append(names, event.Name()) @@ -137,7 +137,7 @@ func convertEvents(events ptrace.SpanEventSlice) (times []time.Time, names []str } func convertLinks(links ptrace.SpanLinkSlice) (traceIDs []string, spanIDs []string, states []string, attrs []column.IterableOrderedMap) { - for i := 0; i < links.Len(); i++ { + for i := range links.Len() { link := links.At(i) traceIDs = append(traceIDs, traceutil.TraceIDToHexOrEmptyString(link.TraceID())) spanIDs = append(spanIDs, traceutil.SpanIDToHexOrEmptyString(link.SpanID())) diff --git a/exporter/clickhouseexporter/exporter_traces_test.go b/exporter/clickhouseexporter/exporter_traces_test.go index dfa76956b2d14..9b4a9861bf1ed 100644 --- a/exporter/clickhouseexporter/exporter_traces_test.go +++ b/exporter/clickhouseexporter/exporter_traces_test.go @@ -70,7 +70,7 @@ func simpleTraces(count int) ptrace.Traces { ss.Scope().SetDroppedAttributesCount(20) ss.Scope().Attributes().PutStr("lib", "clickhouse") timestamp := time.Unix(1703498029, 0) - for i := 0; i < count; i++ { + for i := range count { s := ss.Spans().AppendEmpty() s.SetTraceID([16]byte{1, 2, 3, byte(i)}) s.SetSpanID([8]byte{1, 2, 3, byte(i)}) diff --git a/exporter/clickhouseexporter/internal/exponential_histogram_metrics.go b/exporter/clickhouseexporter/internal/exponential_histogram_metrics.go index e74e6456a70af..c89e8f3447c1e 100644 --- a/exporter/clickhouseexporter/internal/exponential_histogram_metrics.go +++ b/exporter/clickhouseexporter/internal/exponential_histogram_metrics.go @@ -133,7 +133,7 @@ func (e *expHistogramMetrics) insert(ctx context.Context, db *sql.DB) error { scopeAttr := AttributesToMap(model.metadata.ScopeInstr.Attributes()) serviceName := GetServiceName(model.metadata.ResAttr) - for i := 0; i < model.expHistogram.DataPoints().Len(); i++ { + for i := range model.expHistogram.DataPoints().Len() { dp := model.expHistogram.DataPoints().At(i) attrs, times, values, traceIDs, spanIDs := convertExemplars(dp.Exemplars()) _, err = statement.ExecContext(ctx, diff --git a/exporter/clickhouseexporter/internal/gauge_metrics.go b/exporter/clickhouseexporter/internal/gauge_metrics.go index a0fbb4d275f1e..1e8745b76f095 100644 --- a/exporter/clickhouseexporter/internal/gauge_metrics.go +++ b/exporter/clickhouseexporter/internal/gauge_metrics.go @@ -112,7 +112,7 @@ func (g *gaugeMetrics) insert(ctx context.Context, db *sql.DB) error { scopeAttr := AttributesToMap(model.metadata.ScopeInstr.Attributes()) serviceName := GetServiceName(model.metadata.ResAttr) - for i := 0; i < model.gauge.DataPoints().Len(); i++ { + for i := range model.gauge.DataPoints().Len() { dp := model.gauge.DataPoints().At(i) attrs, times, values, traceIDs, spanIDs := convertExemplars(dp.Exemplars()) _, err = statement.ExecContext(ctx, diff --git a/exporter/clickhouseexporter/internal/histogram_metrics.go b/exporter/clickhouseexporter/internal/histogram_metrics.go index cdd4508722e8e..962c67c595753 100644 --- a/exporter/clickhouseexporter/internal/histogram_metrics.go +++ b/exporter/clickhouseexporter/internal/histogram_metrics.go @@ -124,7 +124,7 @@ func (h *histogramMetrics) insert(ctx context.Context, db *sql.DB) error { scopeAttr := AttributesToMap(model.metadata.ScopeInstr.Attributes()) serviceName := GetServiceName(model.metadata.ResAttr) - for i := 0; i < model.histogram.DataPoints().Len(); i++ { + for i := range model.histogram.DataPoints().Len() { dp := model.histogram.DataPoints().At(i) attrs, times, values, traceIDs, spanIDs := convertExemplars(dp.Exemplars()) _, err = statement.ExecContext(ctx, diff --git a/exporter/clickhouseexporter/internal/metrics_model.go b/exporter/clickhouseexporter/internal/metrics_model.go index 7c9377e83fe1f..494e3e581a6d2 100644 --- a/exporter/clickhouseexporter/internal/metrics_model.go +++ b/exporter/clickhouseexporter/internal/metrics_model.go @@ -119,7 +119,7 @@ func convertExemplars(exemplars pmetric.ExemplarSlice) (clickhouse.ArraySet, cli traceIDs clickhouse.ArraySet spanIDs clickhouse.ArraySet ) - for i := 0; i < exemplars.Len(); i++ { + for i := range exemplars.Len() { exemplar := exemplars.At(i) attrs = append(attrs, AttributesToMap(exemplar.FilteredAttributes())) times = append(times, exemplar.Timestamp().AsTime()) @@ -198,7 +198,7 @@ func convertValueAtQuantile(valueAtQuantile pmetric.SummaryDataPointValueAtQuant quantiles clickhouse.ArraySet values clickhouse.ArraySet ) - for i := 0; i < valueAtQuantile.Len(); i++ { + for i := range valueAtQuantile.Len() { value := valueAtQuantile.At(i) quantiles = append(quantiles, value.Quantile()) values = append(values, value.Value()) @@ -225,7 +225,7 @@ func doWithTx(ctx context.Context, db *sql.DB, fn func(tx *sql.Tx) error) error func newPlaceholder(count int) *string { var b strings.Builder - for i := 0; i < count; i++ { + for range count { b.WriteString(",?") } b.WriteString("),") diff --git a/exporter/clickhouseexporter/internal/sum_metrics.go b/exporter/clickhouseexporter/internal/sum_metrics.go index 28e5d553a18d5..eb7254306a573 100644 --- a/exporter/clickhouseexporter/internal/sum_metrics.go +++ b/exporter/clickhouseexporter/internal/sum_metrics.go @@ -116,7 +116,7 @@ func (s *sumMetrics) insert(ctx context.Context, db *sql.DB) error { scopeAttr := AttributesToMap(model.metadata.ScopeInstr.Attributes()) serviceName := GetServiceName(model.metadata.ResAttr) - for i := 0; i < model.sum.DataPoints().Len(); i++ { + for i := range model.sum.DataPoints().Len() { dp := model.sum.DataPoints().At(i) attrs, times, values, traceIDs, spanIDs := convertExemplars(dp.Exemplars()) _, err = statement.ExecContext(ctx, diff --git a/exporter/clickhouseexporter/internal/summary_metrics.go b/exporter/clickhouseexporter/internal/summary_metrics.go index 749445ec427c1..7e2cc855e336d 100644 --- a/exporter/clickhouseexporter/internal/summary_metrics.go +++ b/exporter/clickhouseexporter/internal/summary_metrics.go @@ -106,7 +106,7 @@ func (s *summaryMetrics) insert(ctx context.Context, db *sql.DB) error { scopeAttr := AttributesToMap(model.metadata.ScopeInstr.Attributes()) serviceName := GetServiceName(model.metadata.ResAttr) - for i := 0; i < model.summary.DataPoints().Len(); i++ { + for i := range model.summary.DataPoints().Len() { dp := model.summary.DataPoints().At(i) quantiles, values := convertValueAtQuantile(dp.QuantileValues()) diff --git a/exporter/coralogixexporter/logs_client.go b/exporter/coralogixexporter/logs_client.go index 99c66c2002553..657f089d03239 100644 --- a/exporter/coralogixexporter/logs_client.go +++ b/exporter/coralogixexporter/logs_client.go @@ -80,7 +80,7 @@ func (e *logsExporter) shutdown(context.Context) error { func (e *logsExporter) pushLogs(ctx context.Context, ld plog.Logs) error { rss := ld.ResourceLogs() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { resourceLog := rss.At(i) appName, subsystem := e.config.getMetadataFromResource(resourceLog.Resource()) resourceLog.Resource().Attributes().PutStr(cxAppNameAttrName, appName) diff --git a/exporter/coralogixexporter/metrics_client.go b/exporter/coralogixexporter/metrics_client.go index 64aa5d6ba88fc..6969f9a07d4f1 100644 --- a/exporter/coralogixexporter/metrics_client.go +++ b/exporter/coralogixexporter/metrics_client.go @@ -78,7 +78,7 @@ func (e *metricsExporter) start(ctx context.Context, host component.Host) (err e func (e *metricsExporter) pushMetrics(ctx context.Context, md pmetric.Metrics) error { rss := md.ResourceMetrics() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { resourceMetric := rss.At(i) appName, subsystem := e.config.getMetadataFromResource(resourceMetric.Resource()) resourceMetric.Resource().Attributes().PutStr(cxAppNameAttrName, appName) diff --git a/exporter/coralogixexporter/traces_client.go b/exporter/coralogixexporter/traces_client.go index 2b5665624687e..bbfd3911c32d0 100644 --- a/exporter/coralogixexporter/traces_client.go +++ b/exporter/coralogixexporter/traces_client.go @@ -75,7 +75,7 @@ func (e *tracesExporter) start(ctx context.Context, host component.Host) (err er func (e *tracesExporter) pushTraces(ctx context.Context, td ptrace.Traces) error { rss := td.ResourceSpans() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { resourceSpan := rss.At(i) appName, subsystem := e.config.getMetadataFromResource(resourceSpan.Resource()) resourceSpan.Resource().Attributes().PutStr(cxAppNameAttrName, appName) diff --git a/exporter/datadogexporter/factory.go b/exporter/datadogexporter/factory.go index 7361334fd5156..fea7b0107b98d 100644 --- a/exporter/datadogexporter/factory.go +++ b/exporter/datadogexporter/factory.go @@ -200,7 +200,7 @@ func checkAndCastConfig(c component.Config, logger *zap.Logger) *Config { } func (f *factory) consumeStatsPayload(ctx context.Context, wg *sync.WaitGroup, statsIn <-chan []byte, statsWriter *writer.DatadogStatsWriter, tracerVersion string, agentVersion string, logger *zap.Logger) { - for i := 0; i < runtime.NumCPU(); i++ { + for range runtime.NumCPU() { wg.Add(1) go func() { defer wg.Done() @@ -294,7 +294,7 @@ func (f *factory) createMetricsExporter( }) // Consume resources for host metadata - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { res := md.ResourceMetrics().At(i).Resource() consumeResource(metadataReporter, res, set.Logger) } @@ -402,7 +402,7 @@ func (f *factory) createTracesExporter( go hostmetadata.RunPusher(ctx, set, pcfg, hostProvider, attrs, metadataReporter) }) // Consume resources for host metadata - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { res := td.ResourceSpans().At(i).Resource() consumeResource(metadataReporter, res, set.Logger) } @@ -497,7 +497,7 @@ func (f *factory) createLogsExporter( attrs := pcommon.NewMap() go hostmetadata.RunPusher(ctx, set, pcfg, hostProvider, attrs, metadataReporter) }) - for i := 0; i < td.ResourceLogs().Len(); i++ { + for i := range td.ResourceLogs().Len() { res := td.ResourceLogs().At(i).Resource() consumeResource(metadataReporter, res, set.Logger) } diff --git a/exporter/datadogexporter/integrationtest/integration_test.go b/exporter/datadogexporter/integrationtest/integration_test.go index d025e98f7bb91..ee626d30d1839 100644 --- a/exporter/datadogexporter/integrationtest/integration_test.go +++ b/exporter/datadogexporter/integrationtest/integration_test.go @@ -255,7 +255,7 @@ func sendTraces(t *testing.T, endpoint string) { }() tracer := otel.Tracer("test-tracer") - for i := 0; i < 10; i++ { + for i := range 10 { _, span := tracer.Start(ctx, fmt.Sprintf("TestSpan%d", i), apitrace.WithSpanKind(apitrace.SpanKindClient)) if i == 3 { @@ -420,7 +420,7 @@ func sendTracesComputeTopLevelBySpanKind(t *testing.T, endpoint string) { }() tracer := otel.Tracer("test-tracer") - for i := 0; i < 10; i++ { + for i := range 10 { var spanKind apitrace.SpanKind switch i { case 0, 1: diff --git a/exporter/datadogexporter/internal/metrics/sketches/sketches_test.go b/exporter/datadogexporter/internal/metrics/sketches/sketches_test.go index 1735636a5004e..058093fcedfec 100644 --- a/exporter/datadogexporter/internal/metrics/sketches/sketches_test.go +++ b/exporter/datadogexporter/internal/metrics/sketches/sketches_test.go @@ -18,7 +18,7 @@ import ( func makesketch(n int) *quantile.Sketch { s, c := &quantile.Sketch{}, quantile.Default() - for i := 0; i < n; i++ { + for i := range n { s.Insert(c, float64(i)) } return s @@ -38,7 +38,7 @@ func Makeseries(i int) SketchSeries { } // We create i+5 Sketch Points to ensure all hosts have at least 5 Sketch Points for tests - for j := 0; j < i+5; j++ { + for j := range i + 5 { ss.Points = append(ss.Points, SketchPoint{ Ts: 10 * int64(j), Sketch: makesketch(j), diff --git a/exporter/datadogexporter/logs_exporter.go b/exporter/datadogexporter/logs_exporter.go index 540ee7aaf8bf6..66b5c7deb2fb2 100644 --- a/exporter/datadogexporter/logs_exporter.go +++ b/exporter/datadogexporter/logs_exporter.go @@ -126,7 +126,7 @@ func (exp *logsExporter) consumeLogs(ctx context.Context, ld plog.Logs) (err err }) // Consume resources for host metadata - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { res := ld.ResourceLogs().At(i).Resource() consumeResource(exp.metadataReporter, res, exp.params.Logger) } diff --git a/exporter/datadogexporter/metrics_exporter.go b/exporter/datadogexporter/metrics_exporter.go index d7a7369b7bf1f..adfc143739530 100644 --- a/exporter/datadogexporter/metrics_exporter.go +++ b/exporter/datadogexporter/metrics_exporter.go @@ -177,7 +177,7 @@ func (exp *metricsExporter) PushMetricsData(ctx context.Context, md pmetric.Metr }) // Consume resources for host metadata - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { res := md.ResourceMetrics().At(i).Resource() consumeResource(exp.metadataReporter, res, exp.params.Logger) } diff --git a/exporter/datadogexporter/traces_exporter.go b/exporter/datadogexporter/traces_exporter.go index 4223fc5c88b3f..8279bca7f80b0 100644 --- a/exporter/datadogexporter/traces_exporter.go +++ b/exporter/datadogexporter/traces_exporter.go @@ -125,7 +125,7 @@ func (exp *traceExporter) consumeTraces( }) // Consume resources for host metadata - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { res := td.ResourceSpans().At(i).Resource() consumeResource(exp.metadataReporter, res, exp.params.Logger) } @@ -137,7 +137,7 @@ func (exp *traceExporter) consumeTraces( if noAPMStatsFeatureGate.IsEnabled() { header[headerComputedStats] = []string{"true"} } - for i := 0; i < rspans.Len(); i++ { + for i := range rspans.Len() { rspan := rspans.At(i) src := exp.agent.OTLPReceiver.ReceiveResourceSpans(ctx, rspan, header, exp.gatewayUsage) switch src.Kind { diff --git a/exporter/datasetexporter/logs_exporter.go b/exporter/datasetexporter/logs_exporter.go index f1425c0312aa3..a6a4ac5ba3d00 100644 --- a/exporter/datasetexporter/logs_exporter.go +++ b/exporter/datasetexporter/logs_exporter.go @@ -239,13 +239,13 @@ func (e *DatasetExporter) consumeLogs(_ context.Context, ld plog.Logs) error { var events []*add_events.EventBundle resourceLogs := ld.ResourceLogs() - for i := 0; i < resourceLogs.Len(); i++ { + for i := range resourceLogs.Len() { resource := resourceLogs.At(i).Resource() scopeLogs := resourceLogs.At(i).ScopeLogs() - for j := 0; j < scopeLogs.Len(); j++ { + for j := range scopeLogs.Len() { scope := scopeLogs.At(j).Scope() logRecords := scopeLogs.At(j).LogRecords() - for k := 0; k < logRecords.Len(); k++ { + for k := range logRecords.Len() { logRecord := logRecords.At(k) events = append( events, diff --git a/exporter/datasetexporter/logs_exporter_stress_test.go b/exporter/datasetexporter/logs_exporter_stress_test.go index 5d0f8ebdacdd3..d36ba0b85febc 100644 --- a/exporter/datasetexporter/logs_exporter_stress_test.go +++ b/exporter/datasetexporter/logs_exporter_stress_test.go @@ -101,11 +101,11 @@ func TestConsumeLogsManyLogsShouldSucceed(t *testing.T) { err = logs.Start(context.Background(), componenttest.NewNopHost()) assert.NoError(t, err) - for bI := 0; bI < maxBatchCount; bI++ { + for bI := range maxBatchCount { batch := plog.NewLogs() rL := batch.ResourceLogs().AppendEmpty() sL := rL.ScopeLogs().AppendEmpty() - for lI := 0; lI < logsPerBatch; lI++ { + for lI := range logsPerBatch { key := fmt.Sprintf("%04d-%06d", bI, lI) log := sL.LogRecords().AppendEmpty() log.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) diff --git a/exporter/datasetexporter/traces_exporter.go b/exporter/datasetexporter/traces_exporter.go index 78674249dd9a4..c0717028caf4f 100644 --- a/exporter/datasetexporter/traces_exporter.go +++ b/exporter/datasetexporter/traces_exporter.go @@ -140,13 +140,13 @@ func buildEventsFromTraces(ld ptrace.Traces, serverHost string, settings TracesS // convert spans into events resourceSpans := ld.ResourceSpans() - for i := 0; i < resourceSpans.Len(); i++ { + for i := range resourceSpans.Len() { resource := resourceSpans.At(i).Resource() scopeSpans := resourceSpans.At(i).ScopeSpans() - for j := 0; j < scopeSpans.Len(); j++ { + for j := range scopeSpans.Len() { scope := scopeSpans.At(j).Scope() spanRecords := scopeSpans.At(j).Spans() - for k := 0; k < spanRecords.Len(); k++ { + for k := range spanRecords.Len() { spanRecord := spanRecords.At(k) spans = append(spans, spanBundle{spanRecord, resource, scope}) } diff --git a/exporter/dorisexporter/exporter_logs.go b/exporter/dorisexporter/exporter_logs.go index 405cd99ef78a4..ac076c4b3ee24 100644 --- a/exporter/dorisexporter/exporter_logs.go +++ b/exporter/dorisexporter/exporter_logs.go @@ -83,7 +83,7 @@ func (e *logsExporter) shutdown(_ context.Context) error { func (e *logsExporter) pushLogData(ctx context.Context, ld plog.Logs) error { logs := make([]*dLog, 0, ld.LogRecordCount()) - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { resourceLogs := ld.ResourceLogs().At(i) resource := resourceLogs.Resource() resourceAttributes := resource.Attributes() @@ -93,10 +93,10 @@ func (e *logsExporter) pushLogData(ctx context.Context, ld plog.Logs) error { serviceName = v.AsString() } - for j := 0; j < resourceLogs.ScopeLogs().Len(); j++ { + for j := range resourceLogs.ScopeLogs().Len() { scopeLogs := resourceLogs.ScopeLogs().At(j) - for k := 0; k < scopeLogs.LogRecords().Len(); k++ { + for k := range scopeLogs.LogRecords().Len() { logRecord := scopeLogs.LogRecords().At(k) log := &dLog{ diff --git a/exporter/dorisexporter/exporter_logs_test.go b/exporter/dorisexporter/exporter_logs_test.go index da26ee1de33bb..024cbf25afbe3 100644 --- a/exporter/dorisexporter/exporter_logs_test.go +++ b/exporter/dorisexporter/exporter_logs_test.go @@ -76,7 +76,7 @@ func simpleLogs(count int) plog.Logs { sl.Scope().SetVersion("1.0.0") sl.Scope().Attributes().PutStr("lib", "doris") timestamp := time.Now() - for i := 0; i < count; i++ { + for i := range count { r := sl.LogRecords().AppendEmpty() r.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) r.SetObservedTimestamp(pcommon.NewTimestampFromTime(timestamp)) diff --git a/exporter/dorisexporter/exporter_metrics.go b/exporter/dorisexporter/exporter_metrics.go index c2e0b8597d425..67dd0858e0c5e 100644 --- a/exporter/dorisexporter/exporter_metrics.go +++ b/exporter/dorisexporter/exporter_metrics.go @@ -98,7 +98,7 @@ func (e *metricsExporter) shutdown(_ context.Context) error { func (e *metricsExporter) pushMetricData(ctx context.Context, md pmetric.Metrics) error { metricMap := initMetricMap(md.DataPointCount()) - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { resourceMetric := md.ResourceMetrics().At(i) resource := resourceMetric.Resource() resourceAttributes := resource.Attributes() @@ -108,10 +108,10 @@ func (e *metricsExporter) pushMetricData(ctx context.Context, md pmetric.Metrics serviceName = v.AsString() } - for j := 0; j < resourceMetric.ScopeMetrics().Len(); j++ { + for j := range resourceMetric.ScopeMetrics().Len() { scopeMetric := resourceMetric.ScopeMetrics().At(j) - for k := 0; k < scopeMetric.Metrics().Len(); k++ { + for k := range scopeMetric.Metrics().Len() { metric := scopeMetric.Metrics().At(k) dm := &dMetric{ diff --git a/exporter/dorisexporter/exporter_metrics_test.go b/exporter/dorisexporter/exporter_metrics_test.go index f61dfd4f2c422..e4bf43df98da9 100644 --- a/exporter/dorisexporter/exporter_metrics_test.go +++ b/exporter/dorisexporter/exporter_metrics_test.go @@ -87,7 +87,7 @@ func simpleMetrics(count int, typeSet map[pmetric.MetricType]struct{}) pmetric.M sm.Scope().SetName("Scope name 1") sm.Scope().SetVersion("Scope version 1") timestamp := time.Now() - for i := 0; i < count; i++ { + for i := range count { // gauge if _, ok := typeSet[pmetric.MetricTypeGauge]; ok { m := sm.Metrics().AppendEmpty() @@ -216,7 +216,7 @@ func simpleMetrics(count int, typeSet map[pmetric.MetricType]struct{}) pmetric.M sm.Scope().SetDroppedAttributesCount(20) sm.Scope().SetName("Scope name 2") sm.Scope().SetVersion("Scope version 2") - for i := 0; i < count; i++ { + for i := range count { // gauge if _, ok := typeSet[pmetric.MetricTypeGauge]; ok { m := sm.Metrics().AppendEmpty() @@ -333,7 +333,7 @@ func simpleMetrics(count int, typeSet map[pmetric.MetricType]struct{}) pmetric.M sm.Scope().SetDroppedAttributesCount(20) sm.Scope().SetName("Scope name 3") sm.Scope().SetVersion("Scope version 3") - for i := 0; i < count; i++ { + for i := range count { // gauge if _, ok := typeSet[pmetric.MetricTypeGauge]; ok { m := sm.Metrics().AppendEmpty() diff --git a/exporter/dorisexporter/exporter_traces.go b/exporter/dorisexporter/exporter_traces.go index a17f516a84e3d..3b6486ff31fa0 100644 --- a/exporter/dorisexporter/exporter_traces.go +++ b/exporter/dorisexporter/exporter_traces.go @@ -105,7 +105,7 @@ func (e *tracesExporter) shutdown(_ context.Context) error { func (e *tracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) error { traces := make([]*dTrace, 0, td.SpanCount()) - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { resourceSpan := td.ResourceSpans().At(i) resource := resourceSpan.Resource() resourceAttributes := resource.Attributes() @@ -115,15 +115,15 @@ func (e *tracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) er serviceName = v.AsString() } - for j := 0; j < resourceSpan.ScopeSpans().Len(); j++ { + for j := range resourceSpan.ScopeSpans().Len() { scopeSpan := resourceSpan.ScopeSpans().At(j) - for k := 0; k < scopeSpan.Spans().Len(); k++ { + for k := range scopeSpan.Spans().Len() { span := scopeSpan.Spans().At(k) events := span.Events() newEvents := make([]*dEvent, 0, events.Len()) - for l := 0; l < events.Len(); l++ { + for l := range events.Len() { event := events.At(l) newEvent := &dEvent{ @@ -137,7 +137,7 @@ func (e *tracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) er links := span.Links() newLinks := make([]*dLink, 0, links.Len()) - for l := 0; l < links.Len(); l++ { + for l := range links.Len() { link := links.At(l) newLink := &dLink{ diff --git a/exporter/dorisexporter/exporter_traces_test.go b/exporter/dorisexporter/exporter_traces_test.go index 0a3886d9b689f..9e9fd52987281 100644 --- a/exporter/dorisexporter/exporter_traces_test.go +++ b/exporter/dorisexporter/exporter_traces_test.go @@ -80,7 +80,7 @@ func simpleTraces(count int) ptrace.Traces { ss.Scope().SetDroppedAttributesCount(20) ss.Scope().Attributes().PutStr("lib", "doris") timestamp := time.Now() - for i := 0; i < count; i++ { + for i := range count { s := ss.Spans().AppendEmpty() s.SetTraceID([16]byte{1, 2, 3, byte(i)}) s.SetSpanID([8]byte{1, 2, 3, byte(i)}) diff --git a/exporter/dorisexporter/metrics_exponential_histogram.go b/exporter/dorisexporter/metrics_exponential_histogram.go index 51fb42742ef9b..bd2e45dcfe8c6 100644 --- a/exporter/dorisexporter/metrics_exponential_histogram.go +++ b/exporter/dorisexporter/metrics_exponential_histogram.go @@ -52,12 +52,12 @@ func (m *metricModelExponentialHistogram) add(pm pmetric.Metric, dm *dMetric, e } dataPoints := pm.ExponentialHistogram().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { dp := dataPoints.At(i) exemplars := dp.Exemplars() newExemplars := make([]*dExemplar, 0, exemplars.Len()) - for j := 0; j < exemplars.Len(); j++ { + for j := range exemplars.Len() { exemplar := exemplars.At(j) newExemplar := &dExemplar{ @@ -73,13 +73,13 @@ func (m *metricModelExponentialHistogram) add(pm pmetric.Metric, dm *dMetric, e positiveBucketCounts := dp.Positive().BucketCounts() newPositiveBucketCounts := make([]int64, 0, positiveBucketCounts.Len()) - for j := 0; j < positiveBucketCounts.Len(); j++ { + for j := range positiveBucketCounts.Len() { newPositiveBucketCounts = append(newPositiveBucketCounts, int64(positiveBucketCounts.At(j))) } negativeBucketCounts := dp.Negative().BucketCounts() newNegativeBucketCounts := make([]int64, 0, negativeBucketCounts.Len()) - for j := 0; j < negativeBucketCounts.Len(); j++ { + for j := range negativeBucketCounts.Len() { newNegativeBucketCounts = append(newNegativeBucketCounts, int64(negativeBucketCounts.At(j))) } diff --git a/exporter/dorisexporter/metrics_gauge.go b/exporter/dorisexporter/metrics_gauge.go index 44be7ce3e69df..a8c330e8c1e4b 100644 --- a/exporter/dorisexporter/metrics_gauge.go +++ b/exporter/dorisexporter/metrics_gauge.go @@ -41,12 +41,12 @@ func (m *metricModelGauge) add(pm pmetric.Metric, dm *dMetric, e *metricsExporte } dataPoints := pm.Gauge().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { dp := dataPoints.At(i) exemplars := dp.Exemplars() newExemplars := make([]*dExemplar, 0, exemplars.Len()) - for j := 0; j < exemplars.Len(); j++ { + for j := range exemplars.Len() { exemplar := exemplars.At(j) newExemplar := &dExemplar{ diff --git a/exporter/dorisexporter/metrics_histogram.go b/exporter/dorisexporter/metrics_histogram.go index 43164eefe0f3f..131b65aa95d19 100644 --- a/exporter/dorisexporter/metrics_histogram.go +++ b/exporter/dorisexporter/metrics_histogram.go @@ -47,12 +47,12 @@ func (m *metricModelHistogram) add(pm pmetric.Metric, dm *dMetric, e *metricsExp } dataPoints := pm.Histogram().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { dp := dataPoints.At(i) exemplars := dp.Exemplars() newExemplars := make([]*dExemplar, 0, exemplars.Len()) - for j := 0; j < exemplars.Len(); j++ { + for j := range exemplars.Len() { exemplar := exemplars.At(j) newExemplar := &dExemplar{ @@ -68,13 +68,13 @@ func (m *metricModelHistogram) add(pm pmetric.Metric, dm *dMetric, e *metricsExp bucketCounts := dp.BucketCounts() newBucketCounts := make([]int64, 0, bucketCounts.Len()) - for j := 0; j < bucketCounts.Len(); j++ { + for j := range bucketCounts.Len() { newBucketCounts = append(newBucketCounts, int64(bucketCounts.At(j))) } explicitBounds := dp.ExplicitBounds() newExplicitBounds := make([]float64, 0, explicitBounds.Len()) - for j := 0; j < explicitBounds.Len(); j++ { + for j := range explicitBounds.Len() { newExplicitBounds = append(newExplicitBounds, explicitBounds.At(j)) } diff --git a/exporter/dorisexporter/metrics_sum.go b/exporter/dorisexporter/metrics_sum.go index 57cf1a3d7e8ba..994f8e4f8fbac 100644 --- a/exporter/dorisexporter/metrics_sum.go +++ b/exporter/dorisexporter/metrics_sum.go @@ -43,12 +43,12 @@ func (m *metricModelSum) add(pm pmetric.Metric, dm *dMetric, e *metricsExporter) } dataPoints := pm.Sum().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { dp := dataPoints.At(i) exemplars := dp.Exemplars() newExemplars := make([]*dExemplar, 0, exemplars.Len()) - for j := 0; j < exemplars.Len(); j++ { + for j := range exemplars.Len() { exemplar := exemplars.At(j) newExemplar := &dExemplar{ diff --git a/exporter/dorisexporter/metrics_summary.go b/exporter/dorisexporter/metrics_summary.go index 3c14501f30b51..b4712a9fca2bd 100644 --- a/exporter/dorisexporter/metrics_summary.go +++ b/exporter/dorisexporter/metrics_summary.go @@ -48,12 +48,12 @@ func (m *metricModelSummary) add(pm pmetric.Metric, dm *dMetric, e *metricsExpor } dataPoints := pm.Summary().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { dp := dataPoints.At(i) quantileValues := dp.QuantileValues() newQuantileValues := make([]*dQuantileValue, 0, quantileValues.Len()) - for j := 0; j < quantileValues.Len(); j++ { + for j := range quantileValues.Len() { quantileValue := quantileValues.At(j) newQuantileValue := &dQuantileValue{ diff --git a/exporter/elasticsearchexporter/bulkindexer.go b/exporter/elasticsearchexporter/bulkindexer.go index 75dd8adec25af..988a6e57c0e2f 100644 --- a/exporter/elasticsearchexporter/bulkindexer.go +++ b/exporter/elasticsearchexporter/bulkindexer.go @@ -194,7 +194,7 @@ func newAsyncBulkIndexer(logger *zap.Logger, client esapi.Transport, config *Con } pool.wg.Add(numWorkers) - for i := 0; i < numWorkers; i++ { + for range numWorkers { bi, err := docappender.NewBulkIndexer(bulkIndexerConfig(client, config, requireDataStream)) if err != nil { return nil, err diff --git a/exporter/elasticsearchexporter/exporter.go b/exporter/elasticsearchexporter/exporter.go index 12258179f571f..f3d56576c12ff 100644 --- a/exporter/elasticsearchexporter/exporter.go +++ b/exporter/elasticsearchexporter/exporter.go @@ -171,15 +171,15 @@ func (e *elasticsearchExporter) pushLogsData(ctx context.Context, ld plog.Logs) var errs []error rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rl := rls.At(i) resource := rl.Resource() ills := rl.ScopeLogs() - for j := 0; j < ills.Len(); j++ { + for j := range ills.Len() { ill := ills.At(j) scope := ill.Scope() logs := ill.LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { if err := e.pushLogRecord(ctx, router, resource, rl.SchemaUrl(), logs.At(k), scope, ill.SchemaUrl(), session); err != nil { if cerr := ctx.Err(); cerr != nil { return cerr @@ -264,15 +264,15 @@ func (e *elasticsearchExporter) pushMetricsData( var validationErrs []error // log instead of returning these so that upstream does not retry var errs []error resourceMetrics := metrics.ResourceMetrics() - for i := 0; i < resourceMetrics.Len(); i++ { + for i := range resourceMetrics.Len() { resourceMetric := resourceMetrics.At(i) resource := resourceMetric.Resource() scopeMetrics := resourceMetric.ScopeMetrics() - for j := 0; j < scopeMetrics.Len(); j++ { + for j := range scopeMetrics.Len() { scopeMetrics := scopeMetrics.At(j) scope := scopeMetrics.Scope() - for k := 0; k < scopeMetrics.Metrics().Len(); k++ { + for k := range scopeMetrics.Metrics().Len() { metric := scopeMetrics.Metrics().At(k) upsertDataPoint := func(dp datapoints.DataPoint) error { @@ -302,7 +302,7 @@ func (e *elasticsearchExporter) pushMetricsData( switch metric.Type() { case pmetric.MetricTypeSum: dps := metric.Sum().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { dp := dps.At(l) if err := upsertDataPoint(datapoints.NewNumber(metric, dp)); err != nil { validationErrs = append(validationErrs, err) @@ -311,7 +311,7 @@ func (e *elasticsearchExporter) pushMetricsData( } case pmetric.MetricTypeGauge: dps := metric.Gauge().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { dp := dps.At(l) if err := upsertDataPoint(datapoints.NewNumber(metric, dp)); err != nil { validationErrs = append(validationErrs, err) @@ -324,7 +324,7 @@ func (e *elasticsearchExporter) pushMetricsData( continue } dps := metric.ExponentialHistogram().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { dp := dps.At(l) if err := upsertDataPoint(datapoints.NewExponentialHistogram(metric, dp)); err != nil { validationErrs = append(validationErrs, err) @@ -337,7 +337,7 @@ func (e *elasticsearchExporter) pushMetricsData( continue } dps := metric.Histogram().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { dp := dps.At(l) if err := upsertDataPoint(datapoints.NewHistogram(metric, dp)); err != nil { validationErrs = append(validationErrs, err) @@ -346,7 +346,7 @@ func (e *elasticsearchExporter) pushMetricsData( } case pmetric.MetricTypeSummary: dps := metric.Summary().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { dp := dps.At(l) if err := upsertDataPoint(datapoints.NewSummary(metric, dp)); err != nil { validationErrs = append(validationErrs, err) @@ -411,15 +411,15 @@ func (e *elasticsearchExporter) pushTraceData( var errs []error resourceSpans := td.ResourceSpans() - for i := 0; i < resourceSpans.Len(); i++ { + for i := range resourceSpans.Len() { il := resourceSpans.At(i) resource := il.Resource() scopeSpans := il.ScopeSpans() - for j := 0; j < scopeSpans.Len(); j++ { + for j := range scopeSpans.Len() { scopeSpan := scopeSpans.At(j) scope := scopeSpan.Scope() spans := scopeSpan.Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { span := spans.At(k) if err := e.pushTraceRecord(ctx, router, resource, il.SchemaUrl(), span, scope, scopeSpan.SchemaUrl(), session); err != nil { if cerr := ctx.Err(); cerr != nil { @@ -427,7 +427,7 @@ func (e *elasticsearchExporter) pushTraceData( } errs = append(errs, err) } - for ii := 0; ii < span.Events().Len(); ii++ { + for ii := range span.Events().Len() { spanEvent := span.Events().At(ii) if err := e.pushSpanEvent(ctx, router, resource, il.SchemaUrl(), span, spanEvent, scope, scopeSpan.SchemaUrl(), session); err != nil { errs = append(errs, err) @@ -540,15 +540,15 @@ func (e *elasticsearchExporter) pushProfilesData(ctx context.Context, pd pprofil var errs []error rps := pd.ResourceProfiles() - for i := 0; i < rps.Len(); i++ { + for i := range rps.Len() { rp := rps.At(i) resource := rp.Resource() sps := rp.ScopeProfiles() - for j := 0; j < sps.Len(); j++ { + for j := range sps.Len() { sp := sps.At(j) scope := sp.Scope() p := sp.Profiles() - for k := 0; k < p.Len(); k++ { + for k := range p.Len() { if err := e.pushProfileRecord(ctx, resource, p.At(k), scope, defaultSession, eventsSession, stackTracesSession, stackFramesSession, executablesSession); err != nil { if cerr := ctx.Err(); cerr != nil { return cerr diff --git a/exporter/elasticsearchexporter/exporter_test.go b/exporter/elasticsearchexporter/exporter_test.go index f19b0d3e3e948..2e6b6ec7769d8 100644 --- a/exporter/elasticsearchexporter/exporter_test.go +++ b/exporter/elasticsearchexporter/exporter_test.go @@ -119,7 +119,7 @@ func TestExporterLogs(t *testing.T) { body := pcommon.NewValueMap() m := body.Map() s := m.PutEmptySlice("a") - for i := 0; i < 2; i++ { + for i := range 2 { s.AppendEmpty().SetInt(int64(i)) } return body @@ -672,7 +672,7 @@ func TestExporterLogs(t *testing.T) { cfg.Retry.InitialInterval = 1 * time.Millisecond cfg.Retry.MaxInterval = 10 * time.Millisecond }) - for i := 0; i < 3; i++ { + for i := range 3 { logRecord := plog.NewLogRecord() logRecord.Attributes().PutInt("idx", int64(i)) mustSendLogRecords(t, exporter, logRecord) diff --git a/exporter/elasticsearchexporter/integrationtest/exporter_bench_test.go b/exporter/elasticsearchexporter/integrationtest/exporter_bench_test.go index cc78984acb3df..59faae94eaf29 100644 --- a/exporter/elasticsearchexporter/integrationtest/exporter_bench_test.go +++ b/exporter/elasticsearchexporter/integrationtest/exporter_bench_test.go @@ -70,7 +70,7 @@ func benchmarkLogs(b *testing.B, batchSize int, mappingMode string) { b.ReportAllocs() b.ResetTimer() b.StopTimer() - for i := 0; i < b.N; i++ { + for range b.N { b.StartTimer() require.NoError(b, exporter.ConsumeLogs(ctx, logs)) b.StopTimer() @@ -100,7 +100,7 @@ func benchmarkMetrics(b *testing.B, batchSize int, mappingMode string) { b.ReportAllocs() b.ResetTimer() b.StopTimer() - for i := 0; i < b.N; i++ { + for range b.N { b.StartTimer() require.NoError(b, exporter.ConsumeMetrics(ctx, metrics)) b.StopTimer() @@ -130,7 +130,7 @@ func benchmarkTraces(b *testing.B, batchSize int, mappingMode string) { b.ReportAllocs() b.ResetTimer() b.StopTimer() - for i := 0; i < b.N; i++ { + for range b.N { b.StartTimer() require.NoError(b, exporter.ConsumeTraces(ctx, traces)) b.StopTimer() diff --git a/exporter/elasticsearchexporter/internal/datapoints/histogram.go b/exporter/elasticsearchexporter/internal/datapoints/histogram.go index 1f681e25a7709..cf9f9e7ddc98a 100644 --- a/exporter/elasticsearchexporter/internal/datapoints/histogram.go +++ b/exporter/elasticsearchexporter/internal/datapoints/histogram.go @@ -68,7 +68,7 @@ func histogramToValue(dp pmetric.HistogramDataPoint) (pcommon.Value, error) { values.EnsureCapacity(bucketCounts.Len()) counts.EnsureCapacity(bucketCounts.Len()) - for i := 0; i < bucketCounts.Len(); i++ { + for i := range bucketCounts.Len() { count := bucketCounts.At(i) if count == 0 { continue diff --git a/exporter/elasticsearchexporter/internal/exphistogram/exphistogram.go b/exporter/elasticsearchexporter/internal/exphistogram/exphistogram.go index f1912421fc519..ea4ce963a5a31 100644 --- a/exporter/elasticsearchexporter/internal/exphistogram/exphistogram.go +++ b/exporter/elasticsearchexporter/internal/exphistogram/exphistogram.go @@ -52,7 +52,7 @@ func ToTDigest(dp pmetric.ExponentialHistogramDataPoint) (counts []int64, values offset = int(dp.Positive().Offset()) bucketCounts = dp.Positive().BucketCounts() - for i := 0; i < bucketCounts.Len(); i++ { + for i := range bucketCounts.Len() { count := bucketCounts.At(i) if count == 0 { continue diff --git a/exporter/elasticsearchexporter/internal/objmodel/objmodel.go b/exporter/elasticsearchexporter/internal/objmodel/objmodel.go index 39b0294d48f5d..be7868e42d755 100644 --- a/exporter/elasticsearchexporter/internal/objmodel/objmodel.go +++ b/exporter/elasticsearchexporter/internal/objmodel/objmodel.go @@ -196,7 +196,7 @@ func (doc *Document) AddAttribute(key string, attribute pcommon.Value) { // AddEvents converts and adds span events to the document. func (doc *Document) AddEvents(key string, events ptrace.SpanEventSlice) { - for i := 0; i < events.Len(); i++ { + for i := range events.Len() { e := events.At(i) doc.AddTimestamp(flattenKey(key, e.Name()+".time"), e.Timestamp()) doc.AddAttributes(flattenKey(key, e.Name()), e.Attributes()) @@ -237,7 +237,7 @@ func (doc *Document) Dedup() { // // This step removes potential conflicts when dedotting and serializing fields. var renamed bool - for i := 0; i < len(doc.fields)-1; i++ { + for i := range len(doc.fields) - 1 { key, nextKey := doc.fields[i].key, doc.fields[i+1].key if len(key) < len(nextKey) && strings.HasPrefix(nextKey, key) && nextKey[len(key)] == '.' { renamed = true @@ -252,7 +252,7 @@ func (doc *Document) Dedup() { // // This step ensures that we do not have duplicate fields names when serializing. // Elasticsearch JSON parser will fail otherwise. - for i := 0; i < len(doc.fields)-1; i++ { + for i := range len(doc.fields) - 1 { if doc.fields[i].key == doc.fields[i+1].key { doc.fields[i].value = ignoreValue } @@ -555,7 +555,7 @@ func arrFromAttributes(aa pcommon.Slice) []Value { } values := make([]Value, aa.Len()) - for i := 0; i < aa.Len(); i++ { + for i := range aa.Len() { values[i] = ValueFromAttribute(aa.At(i)) } return values @@ -597,7 +597,7 @@ func commonObjPrefix(a, b string) int { end = alt } - for i := 0; i < end; i++ { + for i := range end { if a[i] != b[i] { return i } diff --git a/exporter/elasticsearchexporter/internal/serializer/map.go b/exporter/elasticsearchexporter/internal/serializer/map.go index 4ef5333f3b661..1c8fe4576d5e5 100644 --- a/exporter/elasticsearchexporter/internal/serializer/map.go +++ b/exporter/elasticsearchexporter/internal/serializer/map.go @@ -53,7 +53,7 @@ func WriteValue(v *json.Visitor, val pcommon.Value, stringifyMaps bool) { case pcommon.ValueTypeSlice: _ = v.OnArrayStart(-1, structform.AnyType) slice := val.Slice() - for i := 0; i < slice.Len(); i++ { + for i := range slice.Len() { WriteValue(v, slice.At(i), stringifyMaps) } _ = v.OnArrayFinished() diff --git a/exporter/elasticsearchexporter/internal/serializer/otelserializer/logs.go b/exporter/elasticsearchexporter/internal/serializer/otelserializer/logs.go index b88c38cfd279f..f3ff54589e974 100644 --- a/exporter/elasticsearchexporter/internal/serializer/otelserializer/logs.go +++ b/exporter/elasticsearchexporter/internal/serializer/otelserializer/logs.go @@ -62,7 +62,7 @@ func writeLogBody(v *json.Visitor, record plog.LogRecord) { // otherwise, wrap the array in an object s := body.Slice() allMaps := true - for i := 0; i < s.Len(); i++ { + for i := range s.Len() { if s.At(i).Type() != pcommon.ValueTypeMap { allMaps = false } diff --git a/exporter/elasticsearchexporter/internal/serializer/otelserializer/serializeprofiles/benchmark_test.go b/exporter/elasticsearchexporter/internal/serializer/otelserializer/serializeprofiles/benchmark_test.go index 506c1cca57828..32bd91d7eafe0 100644 --- a/exporter/elasticsearchexporter/internal/serializer/otelserializer/serializeprofiles/benchmark_test.go +++ b/exporter/elasticsearchexporter/internal/serializer/otelserializer/serializeprofiles/benchmark_test.go @@ -74,7 +74,7 @@ func BenchmarkTransform(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _, _ = Transform(rp.Resource(), sp.Scope(), p) } }) diff --git a/exporter/elasticsearchexporter/internal/serializer/otelserializer/serializeprofiles/transform.go b/exporter/elasticsearchexporter/internal/serializer/otelserializer/serializeprofiles/transform.go index e1e31d5718aa5..99abbc4bebd0a 100644 --- a/exporter/elasticsearchexporter/internal/serializer/otelserializer/serializeprofiles/transform.go +++ b/exporter/elasticsearchexporter/internal/serializer/otelserializer/serializeprofiles/transform.go @@ -82,7 +82,7 @@ func stackPayloads(resource pcommon.Resource, scope pcommon.InstrumentationScope hostMetadata := newHostMetadata(resource, scope, profile) - for i := 0; i < profile.Sample().Len(); i++ { + for i := range profile.Sample().Len() { sample := profile.Sample().At(i) frames, frameTypes, leafFrame, err := stackFrames(profile, sample) @@ -112,7 +112,7 @@ func stackPayloads(resource pcommon.Resource, scope pcommon.InstrumentationScope } // Add one event per timestamp and its count value. - for j := 0; j < sample.TimestampsUnixNano().Len(); j++ { + for j := range sample.TimestampsUnixNano().Len() { t := sample.TimestampsUnixNano().At(j) event.TimeStamp = newUnixTime64(t) @@ -168,7 +168,7 @@ func stackTraceEvent(traceID string, profile pprofile.Profile, sample pprofile.S } // Store event-specific attributes. - for i := 0; i < sample.AttributeIndices().Len(); i++ { + for i := range sample.AttributeIndices().Len() { if profile.AttributeTable().Len() < i { continue } @@ -240,7 +240,7 @@ func stackFrames(profile pprofile.Profile, sample pprofile.Sample) ([]StackFrame fileNames := make([]string, 0, location.Line().Len()) lineNumbers := make([]int32, 0, location.Line().Len()) - for i := 0; i < location.Line().Len(); i++ { + for i := range location.Line().Len() { line := location.Line().At(i) if line.FunctionIndex() < int32(profile.FunctionTable().Len()) { @@ -301,7 +301,7 @@ type attributable interface { func getStringFromAttribute(profile pprofile.Profile, record attributable, attrKey string) (string, error) { lenAttrTable := profile.AttributeTable().Len() - for i := 0; i < record.AttributeIndices().Len(); i++ { + for i := range record.AttributeIndices().Len() { idx := int(record.AttributeIndices().At(i)) if idx >= lenAttrTable { @@ -331,7 +331,7 @@ func executables(profile pprofile.Profile, mappings pprofile.MappingSlice) ([]Ex metadata := make([]ExeMetadata, 0, mappings.Len()) lastSeen := GetStartOfWeekFromTime(time.Now()) - for i := 0; i < mappings.Len(); i++ { + for i := range mappings.Len() { mapping := mappings.At(i) filename := profile.StringTable().At(int(mapping.FilenameStrindex())) diff --git a/exporter/elasticsearchexporter/internal/serializer/otelserializer/traces.go b/exporter/elasticsearchexporter/internal/serializer/otelserializer/traces.go index 4f9619a719dd7..2dda36ed4b698 100644 --- a/exporter/elasticsearchexporter/internal/serializer/otelserializer/traces.go +++ b/exporter/elasticsearchexporter/internal/serializer/otelserializer/traces.go @@ -80,7 +80,7 @@ func writeSpanLinks(v *json.Visitor, span ptrace.Span) { _ = v.OnKey("links") _ = v.OnArrayStart(-1, structform.AnyType) spanLinks := span.Links() - for i := 0; i < spanLinks.Len(); i++ { + for i := range spanLinks.Len() { spanLink := spanLinks.At(i) _ = v.OnObjectStart(-1, structform.AnyType) writeStringFieldSkipDefault(v, "trace_id", spanLink.TraceID().String()) diff --git a/exporter/elasticsearchexporter/metric_grouping.go b/exporter/elasticsearchexporter/metric_grouping.go index f847e9f31ef5d..310646db8fe6c 100644 --- a/exporter/elasticsearchexporter/metric_grouping.go +++ b/exporter/elasticsearchexporter/metric_grouping.go @@ -131,7 +131,7 @@ func valueHash(h hash.Hash, v pcommon.Value) { } func sliceHash(h hash.Hash, s pcommon.Slice) { - for i := 0; i < s.Len(); i++ { + for i := range s.Len() { valueHash(h, s.At(i)) } } diff --git a/exporter/elasticsearchexporter/model.go b/exporter/elasticsearchexporter/model.go index 167a45d280d29..b61132a4a2879 100644 --- a/exporter/elasticsearchexporter/model.go +++ b/exporter/elasticsearchexporter/model.go @@ -289,7 +289,7 @@ func (m *encodeModel) encodeEvents(document *objmodel.Document, events ptrace.Sp func spanLinksToString(spanLinkSlice ptrace.SpanLinkSlice) string { linkArray := make([]map[string]any, 0, spanLinkSlice.Len()) - for i := 0; i < spanLinkSlice.Len(); i++ { + for i := range spanLinkSlice.Len() { spanLink := spanLinkSlice.At(i) link := map[string]any{} link[spanIDField] = traceutil.SpanIDToHexOrEmptyString(spanLink.SpanID()) diff --git a/exporter/elasticsearchexporter/model_test.go b/exporter/elasticsearchexporter/model_test.go index 37ccb26238398..130d174aae794 100644 --- a/exporter/elasticsearchexporter/model_test.go +++ b/exporter/elasticsearchexporter/model_test.go @@ -99,7 +99,7 @@ func TestEncodeMetric(t *testing.T) { sm := rm.ScopeMetrics().At(0) m := sm.Metrics().At(0) dps := m.Sum().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := datapoints.NewNumber(m, dps.At(i)) dpHash := hasher.hashDataPoint(rm.Resource(), sm.Scope(), dp) dataPoints, ok := groupedDataPoints[dpHash] @@ -253,7 +253,7 @@ func TestEncodeEvents(t *testing.T) { events := ptrace.NewSpanEventSlice() events.EnsureCapacity(4) - for i := 0; i < 4; i++ { + for i := range 4 { event := events.AppendEmpty() event.SetTimestamp(pcommon.NewTimestampFromTime(time.Now().Add(time.Duration(i) * time.Minute))) event.SetName(fmt.Sprintf("event_%d", i)) diff --git a/exporter/fileexporter/buffered_writer_test.go b/exporter/fileexporter/buffered_writer_test.go index d97832bb6eb1b..ebedaa09921ab 100644 --- a/exporter/fileexporter/buffered_writer_test.go +++ b/exporter/fileexporter/buffered_writer_test.go @@ -63,7 +63,7 @@ func BenchmarkWriter(b *testing.B) { 10 * SizeMegaByte, } { payload := make([]byte, payloadSize) - for i := 0; i < payloadSize; i++ { + for i := range payloadSize { payload[i] = 'a' } for name, w := range map[string]io.WriteCloser{ @@ -77,7 +77,7 @@ func BenchmarkWriter(b *testing.B) { b.ResetTimer() var err error - for i := 0; i < b.N; i++ { + for range b.N { _, err = w.Write(payload) } errBenchmark = errors.Join(err, w.Close()) diff --git a/exporter/fileexporter/grouping_file_exporter.go b/exporter/fileexporter/grouping_file_exporter.go index 3276f027c1103..3acc112a2608e 100644 --- a/exporter/fileexporter/grouping_file_exporter.go +++ b/exporter/fileexporter/grouping_file_exporter.go @@ -44,7 +44,7 @@ func (e *groupingFileExporter) consumeTraces(ctx context.Context, td ptrace.Trac groups := make(map[string][]ptrace.ResourceSpans) - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { rSpans := td.ResourceSpans().At(i) group(e, groups, rSpans.Resource(), rSpans) } @@ -82,7 +82,7 @@ func (e *groupingFileExporter) consumeMetrics(ctx context.Context, md pmetric.Me groups := make(map[string][]pmetric.ResourceMetrics) - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rMetrics := md.ResourceMetrics().At(i) group(e, groups, rMetrics.Resource(), rMetrics) } @@ -120,7 +120,7 @@ func (e *groupingFileExporter) consumeLogs(ctx context.Context, ld plog.Logs) er groups := make(map[string][]plog.ResourceLogs) - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { rLogs := ld.ResourceLogs().At(i) group(e, groups, rLogs.Resource(), rLogs) } @@ -158,7 +158,7 @@ func (e *groupingFileExporter) consumeProfiles(ctx context.Context, pd pprofile. groups := make(map[string][]pprofile.ResourceProfiles) - for i := 0; i < pd.ResourceProfiles().Len(); i++ { + for i := range pd.ResourceProfiles().Len() { rProfiles := pd.ResourceProfiles().At(i) group(e, groups, rProfiles.Resource(), rProfiles) } diff --git a/exporter/fileexporter/grouping_file_exporter_test.go b/exporter/fileexporter/grouping_file_exporter_test.go index a5b479303af5c..04162329757a8 100644 --- a/exporter/fileexporter/grouping_file_exporter_test.go +++ b/exporter/fileexporter/grouping_file_exporter_test.go @@ -207,7 +207,7 @@ func TestGroupingFileTracesExporter(t *testing.T) { assert.NoError(t, err) gotResourceSpans := make([]ptrace.ResourceSpans, 0) - for i := 0; i < got.ResourceSpans().Len(); i++ { + for i := range got.ResourceSpans().Len() { gotResourceSpans = append(gotResourceSpans, got.ResourceSpans().At(i)) } @@ -284,7 +284,7 @@ func TestGroupingFileLogsExporter(t *testing.T) { assert.NoError(t, err) gotResourceLogs := make([]plog.ResourceLogs, 0) - for i := 0; i < got.ResourceLogs().Len(); i++ { + for i := range got.ResourceLogs().Len() { gotResourceLogs = append(gotResourceLogs, got.ResourceLogs().At(i)) } @@ -362,7 +362,7 @@ func TestGroupingFileMetricsExporter(t *testing.T) { assert.NoError(t, err) gotResourceMetrics := make([]pmetric.ResourceMetrics, 0) - for i := 0; i < got.ResourceMetrics().Len(); i++ { + for i := range got.ResourceMetrics().Len() { gotResourceMetrics = append(gotResourceMetrics, got.ResourceMetrics().At(i)) } @@ -463,7 +463,7 @@ func BenchmarkExporters(b *testing.B) { var traces []ptrace.Traces var logs []plog.Logs - for i := 0; i < 100; i++ { + for i := range 100 { td := testdata.GenerateTracesTwoSpansSameResource() td.ResourceSpans().At(0).Resource().Attributes().PutStr("fileexporter.path_segment", fmt.Sprintf("file%d", i)) traces = append(traces, td) @@ -499,7 +499,7 @@ func BenchmarkExporters(b *testing.B) { b.ResetTimer() ctx := context.Background() - for i := 0; i < b.N; i++ { + for i := range b.N { require.NoError(b, fe.consumeTraces(ctx, traces[i%len(traces)])) require.NoError(b, fe.consumeLogs(ctx, logs[i%len(logs)])) } diff --git a/exporter/googlecloudpubsubexporter/watermark.go b/exporter/googlecloudpubsubexporter/watermark.go index 4eb4672e5860d..9fe6ac18936aa 100644 --- a/exporter/googlecloudpubsubexporter/watermark.go +++ b/exporter/googlecloudpubsubexporter/watermark.go @@ -63,44 +63,44 @@ func earliestMetricsWatermark(metrics pmetric.Metrics, processingTime time.Time, // traverse the metric data, with a collectFunc func traverseMetrics(metrics pmetric.Metrics, collect collectFunc) { - for rix := 0; rix < metrics.ResourceMetrics().Len(); rix++ { + for rix := range metrics.ResourceMetrics().Len() { r := metrics.ResourceMetrics().At(rix) - for lix := 0; lix < r.ScopeMetrics().Len(); lix++ { + for lix := range r.ScopeMetrics().Len() { l := r.ScopeMetrics().At(lix) - for dix := 0; dix < l.Metrics().Len(); dix++ { + for dix := range l.Metrics().Len() { d := l.Metrics().At(dix) //exhaustive:enforce switch d.Type() { case pmetric.MetricTypeHistogram: - for pix := 0; pix < d.Histogram().DataPoints().Len(); pix++ { + for pix := range d.Histogram().DataPoints().Len() { p := d.Histogram().DataPoints().At(pix) if collect(p.Timestamp()) { return } } case pmetric.MetricTypeExponentialHistogram: - for pix := 0; pix < d.ExponentialHistogram().DataPoints().Len(); pix++ { + for pix := range d.ExponentialHistogram().DataPoints().Len() { p := d.ExponentialHistogram().DataPoints().At(pix) if collect(p.Timestamp()) { return } } case pmetric.MetricTypeSum: - for pix := 0; pix < d.Sum().DataPoints().Len(); pix++ { + for pix := range d.Sum().DataPoints().Len() { p := d.Sum().DataPoints().At(pix) if collect(p.Timestamp()) { return } } case pmetric.MetricTypeGauge: - for pix := 0; pix < d.Gauge().DataPoints().Len(); pix++ { + for pix := range d.Gauge().DataPoints().Len() { p := d.Gauge().DataPoints().At(pix) if collect(p.Timestamp()) { return } } case pmetric.MetricTypeSummary: - for pix := 0; pix < d.Summary().DataPoints().Len(); pix++ { + for pix := range d.Summary().DataPoints().Len() { p := d.Summary().DataPoints().At(pix) if collect(p.Timestamp()) { return @@ -130,11 +130,11 @@ func earliestLogsWatermark(logs plog.Logs, processingTime time.Time, allowedDrif // traverse the log data, with a collectFunc func traverseLogs(logs plog.Logs, collect collectFunc) { - for rix := 0; rix < logs.ResourceLogs().Len(); rix++ { + for rix := range logs.ResourceLogs().Len() { r := logs.ResourceLogs().At(rix) - for lix := 0; lix < r.ScopeLogs().Len(); lix++ { + for lix := range r.ScopeLogs().Len() { l := r.ScopeLogs().At(lix) - for dix := 0; dix < l.LogRecords().Len(); dix++ { + for dix := range l.LogRecords().Len() { d := l.LogRecords().At(dix) if collect(d.Timestamp()) { return @@ -162,11 +162,11 @@ func earliestTracesWatermark(traces ptrace.Traces, processingTime time.Time, all // traverse the trace data, with a collectFunc func traverseTraces(traces ptrace.Traces, collect collectFunc) { - for rix := 0; rix < traces.ResourceSpans().Len(); rix++ { + for rix := range traces.ResourceSpans().Len() { r := traces.ResourceSpans().At(rix) - for lix := 0; lix < r.ScopeSpans().Len(); lix++ { + for lix := range r.ScopeSpans().Len() { l := r.ScopeSpans().At(lix) - for dix := 0; dix < l.Spans().Len(); dix++ { + for dix := range l.Spans().Len() { d := l.Spans().At(dix) if collect(d.StartTimestamp()) { return diff --git a/exporter/kafkaexporter/kafka_exporter.go b/exporter/kafkaexporter/kafka_exporter.go index 6720237fabe27..1f7ee0b22f2b1 100644 --- a/exporter/kafkaexporter/kafka_exporter.go +++ b/exporter/kafkaexporter/kafka_exporter.go @@ -286,7 +286,7 @@ type resource interface { func getTopic[T resource](ctx context.Context, cfg *Config, resources resourceSlice[T]) string { if cfg.TopicFromAttribute != "" { - for i := 0; i < resources.Len(); i++ { + for i := range resources.Len() { rv, ok := resources.At(i).Resource().Attributes().Get(cfg.TopicFromAttribute) if ok && rv.Str() != "" { return rv.Str() diff --git a/exporter/kafkaexporter/marshaler_test.go b/exporter/kafkaexporter/marshaler_test.go index 95be677c7defa..730f7039aeb22 100644 --- a/exporter/kafkaexporter/marshaler_test.go +++ b/exporter/kafkaexporter/marshaler_test.go @@ -128,7 +128,7 @@ func TestOTLPMetricsJsonMarshaling(t *testing.T) { require.NoError(t, err, "Must have marshaled the data without error") require.Len(t, msgs, len(tt.messagePartitionKeys), "Number of messages must be %d, but was %d", len(tt.messagePartitionKeys), len(msgs)) - for i := 0; i < len(tt.messagePartitionKeys); i++ { + for i := range len(tt.messagePartitionKeys) { require.Equal(t, tt.messagePartitionKeys[i], msgs[i].Key, "message %d has incorrect key", i) } }) @@ -189,7 +189,7 @@ func TestOTLPLogsJsonMarshaling(t *testing.T) { require.NoError(t, err, "Must have marshaled the data without error") require.Len(t, msgs, len(tt.messagePartitionKeys), "Number of messages must be %d, but was %d", len(tt.messagePartitionKeys), len(msgs)) - for i := 0; i < len(tt.messagePartitionKeys); i++ { + for i := range len(tt.messagePartitionKeys) { require.Equal(t, tt.messagePartitionKeys[i], msgs[i].Key, "message %d has incorrect key", i) } }) diff --git a/exporter/kafkaexporter/pdata_marshaler.go b/exporter/kafkaexporter/pdata_marshaler.go index ae9726f2cbe88..834fb15c8d33c 100644 --- a/exporter/kafkaexporter/pdata_marshaler.go +++ b/exporter/kafkaexporter/pdata_marshaler.go @@ -25,7 +25,7 @@ func (p pdataLogsMarshaler) Marshal(ld plog.Logs, topic string) ([]*sarama.Produ if p.partitionedByResources { logs := ld.ResourceLogs() - for i := 0; i < logs.Len(); i++ { + for i := range logs.Len() { resourceMetrics := logs.At(i) hash := pdatautil.MapHash(resourceMetrics.Resource().Attributes()) @@ -78,7 +78,7 @@ func (p pdataMetricsMarshaler) Marshal(ld pmetric.Metrics, topic string) ([]*sar if p.partitionedByResources { metrics := ld.ResourceMetrics() - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { resourceMetrics := metrics.At(i) hash := pdatautil.MapHash(resourceMetrics.Resource().Attributes()) diff --git a/exporter/kafkaexporter/raw_marshaler.go b/exporter/kafkaexporter/raw_marshaler.go index 5a9e283436e81..b0cf5b9386dda 100644 --- a/exporter/kafkaexporter/raw_marshaler.go +++ b/exporter/kafkaexporter/raw_marshaler.go @@ -22,11 +22,11 @@ func newRawMarshaler() rawMarshaler { func (r rawMarshaler) Marshal(logs plog.Logs, topic string) ([]*sarama.ProducerMessage, error) { var messages []*sarama.ProducerMessage - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { rl := logs.ResourceLogs().At(i) - for j := 0; j < rl.ScopeLogs().Len(); j++ { + for j := range rl.ScopeLogs().Len() { sl := rl.ScopeLogs().At(j) - for k := 0; k < sl.LogRecords().Len(); k++ { + for k := range sl.LogRecords().Len() { lr := sl.LogRecords().At(k) b, err := r.logBodyAsBytes(lr.Body()) if err != nil { diff --git a/exporter/kineticaexporter/common.go b/exporter/kineticaexporter/common.go index 6eb68b73463ce..dc4a34db5c849 100644 --- a/exporter/kineticaexporter/common.go +++ b/exporter/kineticaexporter/common.go @@ -663,7 +663,7 @@ func otlpKeyValueListToMap(kvList pcommon.Map) map[string]any { // @return []interface{} func otlpArrayToSlice(arr pcommon.Slice) []any { s := make([]any, 0, arr.Len()) - for i := 0; i < arr.Len(); i++ { + for i := range arr.Len() { v := arr.At(i) switch v.Type() { case pcommon.ValueTypeStr: diff --git a/exporter/kineticaexporter/exporter_metric_test.go b/exporter/kineticaexporter/exporter_metric_test.go index 7f048c321056e..4bd328cde2683 100644 --- a/exporter/kineticaexporter/exporter_metric_test.go +++ b/exporter/kineticaexporter/exporter_metric_test.go @@ -46,7 +46,7 @@ func Benchmark_pushMetricsData(b *testing.B) { exporter := newTestMetricsExporter(&testing.T{}) b.ReportAllocs() b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { err := exporter.pushMetricsData(context.TODO(), pm) require.NoError(b, err) } @@ -66,7 +66,7 @@ func simpleMetrics(count int) pmetric.Metrics { sm.Scope().SetDroppedAttributesCount(10) sm.Scope().SetName("Scope name 1") sm.Scope().SetVersion("Scope version 1") - for i := 0; i < count; i++ { + for i := range count { // gauge m := sm.Metrics().AppendEmpty() m.SetName("gauge metrics") @@ -179,7 +179,7 @@ func simpleMetrics(count int) pmetric.Metrics { sm.Scope().SetDroppedAttributesCount(20) sm.Scope().SetName("Scope name 2") sm.Scope().SetVersion("Scope version 2") - for i := 0; i < count; i++ { + for i := range count { // gauge m := sm.Metrics().AppendEmpty() m.SetName("gauge metrics") @@ -286,7 +286,7 @@ func simpleMetrics(count int) pmetric.Metrics { sm.Scope().SetDroppedAttributesCount(20) sm.Scope().SetName("Scope name 3") sm.Scope().SetVersion("Scope version 3") - for i := 0; i < count; i++ { + for i := range count { // gauge m := sm.Metrics().AppendEmpty() m.SetName("gauge metrics") diff --git a/exporter/kineticaexporter/metrics_exporter.go b/exporter/kineticaexporter/metrics_exporter.go index 17dd7cc98b2da..ebd8bbc0eea28 100644 --- a/exporter/kineticaexporter/metrics_exporter.go +++ b/exporter/kineticaexporter/metrics_exporter.go @@ -260,20 +260,20 @@ func (e *kineticaMetricsExporter) pushMetricsData(_ context.Context, md pmetric. e.logger.Debug("Resource metrics ", zap.Int("count = ", md.ResourceMetrics().Len())) - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { metrics := md.ResourceMetrics().At(i) resAttr := metrics.Resource().Attributes() e.logger.Debug("Scope metrics ", zap.Int("count = ", metrics.ScopeMetrics().Len())) - for j := 0; j < metrics.ScopeMetrics().Len(); j++ { + for j := range metrics.ScopeMetrics().Len() { metricSlice := metrics.ScopeMetrics().At(j).Metrics() scopeInstr := metrics.ScopeMetrics().At(j).Scope() scopeURL := metrics.ScopeMetrics().At(j).SchemaUrl() e.logger.Debug("metrics ", zap.Int("count = ", metricSlice.Len())) - for k := 0; k < metricSlice.Len(); k++ { + for k := range metricSlice.Len() { metric := metricSlice.At(k) metricType = metric.Type() switch metric.Type() { @@ -405,7 +405,7 @@ func (e *kineticaMetricsExporter) createSummaryRecord(resAttr pcommon.Map, _ str var datapointAttribute []SummaryDataPointAttribute datapointAttributes := make(map[string]ValueTypePair) - for i := 0; i < summaryRecord.DataPoints().Len(); i++ { + for i := range summaryRecord.DataPoints().Len() { datapoint := summaryRecord.DataPoints().At(i) summaryDatapoint := &SummaryDatapoint{ SummaryID: summary.SummaryID, @@ -448,7 +448,7 @@ func (e *kineticaMetricsExporter) createSummaryRecord(resAttr pcommon.Map, _ str // Handle quantile values quantileValues := datapoint.QuantileValues() - for i := 0; i < quantileValues.Len(); i++ { + for i := range quantileValues.Len() { quantileValue := quantileValues.At(i) summaryQV := &SummaryDatapointQuantileValues{ SummaryID: summary.SummaryID, @@ -560,7 +560,7 @@ func (e *kineticaMetricsExporter) createExponentialHistogramRecord(resAttr pcomm var datapointBucketPositiveCount []ExponentialHistogramBucketPositiveCount var datapointBucketNegativeCount []ExponentialHistogramBucketNegativeCount - for i := 0; i < exponentialHistogramRecord.DataPoints().Len(); i++ { + for i := range exponentialHistogramRecord.DataPoints().Len() { datapoint := exponentialHistogramRecord.DataPoints().At(i) expHistogramDatapoint := ExponentialHistogramDatapoint{ @@ -611,7 +611,7 @@ func (e *kineticaMetricsExporter) createExponentialHistogramRecord(resAttr pcomm // Handle datapoint exemplars exemplars := datapoint.Exemplars() - for i := 0; i < exemplars.Len(); i++ { + for i := range exemplars.Len() { exemplar := exemplars.At(i) sumDatapointExemplar := ExponentialHistogramDatapointExemplar{ HistogramID: histogram.HistogramID, @@ -655,7 +655,7 @@ func (e *kineticaMetricsExporter) createExponentialHistogramRecord(resAttr pcomm } // Handle positive and negative bucket counts - for i := 0; i < datapoint.Positive().BucketCounts().Len(); i++ { + for i := range datapoint.Positive().BucketCounts().Len() { positiveBucketCount := datapoint.Positive().BucketCounts().At(i) datapointBucketPositiveCount = append(datapointBucketPositiveCount, ExponentialHistogramBucketPositiveCount{ HistogramID: expHistogramDatapoint.HistogramID, @@ -666,7 +666,7 @@ func (e *kineticaMetricsExporter) createExponentialHistogramRecord(resAttr pcomm } kiExpHistogramRecord.histogramBucketPositiveCount = append(kiExpHistogramRecord.histogramBucketPositiveCount, datapointBucketPositiveCount...) - for i := 0; i < datapoint.Negative().BucketCounts().Len(); i++ { + for i := range datapoint.Negative().BucketCounts().Len() { negativeBucketCount := datapoint.Negative().BucketCounts().At(i) datapointBucketNegativeCount = append(datapointBucketNegativeCount, ExponentialHistogramBucketNegativeCount{ HistogramID: expHistogramDatapoint.HistogramID, @@ -777,7 +777,7 @@ func (e *kineticaMetricsExporter) createHistogramRecord(resAttr pcommon.Map, _ s exemplarAttributes := make(map[string]ValueTypePair) // Handle data points - for i := 0; i < histogramRecord.DataPoints().Len(); i++ { + for i := range histogramRecord.DataPoints().Len() { datapoint := histogramRecord.DataPoints().At(i) histogramDatapoint := &HistogramDatapoint{ @@ -824,7 +824,7 @@ func (e *kineticaMetricsExporter) createHistogramRecord(resAttr pcommon.Map, _ s // Handle data point exemplars exemplars := datapoint.Exemplars() - for i := 0; i < exemplars.Len(); i++ { + for i := range exemplars.Len() { exemplar := exemplars.At(i) histogramDatapointExemplar := HistogramDatapointExemplar{ HistogramID: histogram.HistogramID, @@ -868,7 +868,7 @@ func (e *kineticaMetricsExporter) createHistogramRecord(resAttr pcommon.Map, _ s } histogramBucketCounts := datapoint.BucketCounts() - for i := 0; i < histogramBucketCounts.Len(); i++ { + for i := range histogramBucketCounts.Len() { bucketCount := HistogramDatapointBucketCount{ HistogramID: histogramDatapoint.HistogramID, DatapointID: histogramDatapoint.ID, @@ -879,7 +879,7 @@ func (e *kineticaMetricsExporter) createHistogramRecord(resAttr pcommon.Map, _ s } histogramExplicitBounds := datapoint.ExplicitBounds() - for i := 0; i < histogramExplicitBounds.Len(); i++ { + for i := range histogramExplicitBounds.Len() { explicitBound := HistogramDatapointExplicitBound{ HistogramID: histogramDatapoint.HistogramID, DatapointID: histogramDatapoint.ID, @@ -997,7 +997,7 @@ func (e *kineticaMetricsExporter) createSumRecord(resAttr pcommon.Map, _ string, var exemplarAttribute []SumDataPointExemplarAttribute exemplarAttributes := make(map[string]ValueTypePair) - for i := 0; i < sumRecord.DataPoints().Len(); i++ { + for i := range sumRecord.DataPoints().Len() { datapoint := sumRecord.DataPoints().At(i) sumDatapoint := SumDatapoint{ @@ -1041,7 +1041,7 @@ func (e *kineticaMetricsExporter) createSumRecord(resAttr pcommon.Map, _ string, // Handle data point exemplars exemplars := datapoint.Exemplars() - for i := 0; i < exemplars.Len(); i++ { + for i := range exemplars.Len() { exemplar := exemplars.At(i) sumDatapointExemplar := SumDatapointExemplar{ SumID: sum.SumID, @@ -1194,7 +1194,7 @@ func (e *kineticaMetricsExporter) createGaugeRecord(resAttr pcommon.Map, _ strin var exemplarAttribute []GaugeDataPointExemplarAttribute exemplarAttributes := make(map[string]ValueTypePair) - for i := 0; i < gaugeRecord.DataPoints().Len(); i++ { + for i := range gaugeRecord.DataPoints().Len() { datapoint := gaugeRecord.DataPoints().At(i) gaugeDatapoint := GaugeDatapoint{ @@ -1237,7 +1237,7 @@ func (e *kineticaMetricsExporter) createGaugeRecord(resAttr pcommon.Map, _ strin // Handle data point exemplars exemplars := datapoint.Exemplars() - for i := 0; i < exemplars.Len(); i++ { + for i := range exemplars.Len() { exemplar := exemplars.At(i) gaugeDatapointExemplar := GaugeDatapointExemplar{ GaugeID: gauge.GaugeID, diff --git a/exporter/loadbalancingexporter/consistent_hashing.go b/exporter/loadbalancingexporter/consistent_hashing.go index faf71d1f515e0..ce17f3ebaee2c 100644 --- a/exporter/loadbalancingexporter/consistent_hashing.go +++ b/exporter/loadbalancingexporter/consistent_hashing.go @@ -103,7 +103,7 @@ func bsearch(pos position, left []ringItem, right []ringItem) ringItem { // The slice length of the result matches the numPoints. func positionsFor(endpoint string, numPoints int) []position { res := make([]position, 0, numPoints) - for i := 0; i < numPoints; i++ { + for i := range numPoints { h := crc32.NewIEEE() h.Write([]byte(endpoint)) h.Write([]byte{byte(i)}) diff --git a/exporter/loadbalancingexporter/helpers_test.go b/exporter/loadbalancingexporter/helpers_test.go index bea11d3c213fc..e9aad01b26439 100644 --- a/exporter/loadbalancingexporter/helpers_test.go +++ b/exporter/loadbalancingexporter/helpers_test.go @@ -69,13 +69,13 @@ func benchMergeTraces(b *testing.B, tracesCount int) { traces1 := ptrace.NewTraces() traces2 := ptrace.NewTraces() - for i := 0; i < tracesCount; i++ { + for range tracesCount { appendSimpleTraceWithID(traces2.ResourceSpans().AppendEmpty(), [16]byte{1, 2, 3, 4}) } b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { mergeTraces(traces1, traces2) } } diff --git a/exporter/loadbalancingexporter/metrics_exporter.go b/exporter/loadbalancingexporter/metrics_exporter.go index fb61bd84ae7f1..43df74dff7015 100644 --- a/exporter/loadbalancingexporter/metrics_exporter.go +++ b/exporter/loadbalancingexporter/metrics_exporter.go @@ -155,7 +155,7 @@ func (e *metricExporterImp) ConsumeMetrics(ctx context.Context, md pmetric.Metri func splitMetricsByResourceServiceName(md pmetric.Metrics) (map[string]pmetric.Metrics, error) { results := map[string]pmetric.Metrics{} - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rm := md.ResourceMetrics().At(i) svc, ok := rm.Resource().Attributes().Get(conventions.AttributeServiceName) @@ -182,7 +182,7 @@ func splitMetricsByResourceServiceName(md pmetric.Metrics) (map[string]pmetric.M func splitMetricsByResourceID(md pmetric.Metrics) map[string]pmetric.Metrics { results := map[string]pmetric.Metrics{} - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rm := md.ResourceMetrics().At(i) newMD := pmetric.NewMetrics() @@ -204,13 +204,13 @@ func splitMetricsByResourceID(md pmetric.Metrics) map[string]pmetric.Metrics { func splitMetricsByMetricName(md pmetric.Metrics) map[string]pmetric.Metrics { results := map[string]pmetric.Metrics{} - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rm := md.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { sm := rm.ScopeMetrics().At(j) - for k := 0; k < sm.Metrics().Len(); k++ { + for k := range sm.Metrics().Len() { m := sm.Metrics().At(k) newMD, mClone := cloneMetricWithoutType(rm, sm, m) @@ -233,15 +233,15 @@ func splitMetricsByMetricName(md pmetric.Metrics) map[string]pmetric.Metrics { func splitMetricsByStreamID(md pmetric.Metrics) map[string]pmetric.Metrics { results := map[string]pmetric.Metrics{} - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rm := md.ResourceMetrics().At(i) res := rm.Resource() - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { sm := rm.ScopeMetrics().At(j) scope := sm.Scope() - for k := 0; k < sm.Metrics().Len(); k++ { + for k := range sm.Metrics().Len() { m := sm.Metrics().At(k) metricID := identity.OfResourceMetric(res, scope, m) @@ -249,7 +249,7 @@ func splitMetricsByStreamID(md pmetric.Metrics) map[string]pmetric.Metrics { case pmetric.MetricTypeGauge: gauge := m.Gauge() - for l := 0; l < gauge.DataPoints().Len(); l++ { + for l := range gauge.DataPoints().Len() { dp := gauge.DataPoints().At(l) newMD, mClone := cloneMetricWithoutType(rm, sm, m) @@ -269,7 +269,7 @@ func splitMetricsByStreamID(md pmetric.Metrics) map[string]pmetric.Metrics { case pmetric.MetricTypeSum: sum := m.Sum() - for l := 0; l < sum.DataPoints().Len(); l++ { + for l := range sum.DataPoints().Len() { dp := sum.DataPoints().At(l) newMD, mClone := cloneMetricWithoutType(rm, sm, m) @@ -291,7 +291,7 @@ func splitMetricsByStreamID(md pmetric.Metrics) map[string]pmetric.Metrics { case pmetric.MetricTypeHistogram: histogram := m.Histogram() - for l := 0; l < histogram.DataPoints().Len(); l++ { + for l := range histogram.DataPoints().Len() { dp := histogram.DataPoints().At(l) newMD, mClone := cloneMetricWithoutType(rm, sm, m) @@ -312,7 +312,7 @@ func splitMetricsByStreamID(md pmetric.Metrics) map[string]pmetric.Metrics { case pmetric.MetricTypeExponentialHistogram: expHistogram := m.ExponentialHistogram() - for l := 0; l < expHistogram.DataPoints().Len(); l++ { + for l := range expHistogram.DataPoints().Len() { dp := expHistogram.DataPoints().At(l) newMD, mClone := cloneMetricWithoutType(rm, sm, m) @@ -333,7 +333,7 @@ func splitMetricsByStreamID(md pmetric.Metrics) map[string]pmetric.Metrics { case pmetric.MetricTypeSummary: summary := m.Summary() - for l := 0; l < summary.DataPoints().Len(); l++ { + for l := range summary.DataPoints().Len() { dp := summary.DataPoints().At(l) newMD, mClone := cloneMetricWithoutType(rm, sm, m) diff --git a/exporter/loadbalancingexporter/metrics_exporter_test.go b/exporter/loadbalancingexporter/metrics_exporter_test.go index 0ec7f6f310ad3..b8cb94db6cca9 100644 --- a/exporter/loadbalancingexporter/metrics_exporter_test.go +++ b/exporter/loadbalancingexporter/metrics_exporter_test.go @@ -838,14 +838,14 @@ func randomMetrics(t require.TestingT, rmCount int, smCount int, mCount int, dpC timeStamp := pcommon.Timestamp(rand.IntN(256)) value := rand.Int64N(256) - for i := 0; i < rmCount; i++ { + for range rmCount { rm := md.ResourceMetrics().AppendEmpty() err := rm.Resource().Attributes().FromRaw(map[string]any{ conventions.AttributeServiceName: fmt.Sprintf("service-%d", rand.IntN(512)), }) require.NoError(t, err) - for j := 0; j < smCount; j++ { + for range smCount { sm := rm.ScopeMetrics().AppendEmpty() scope := sm.Scope() scope.SetName("MyTestInstrument") @@ -855,7 +855,7 @@ func randomMetrics(t require.TestingT, rmCount int, smCount int, mCount int, dpC }) require.NoError(t, err) - for k := 0; k < mCount; k++ { + for range mCount { m := sm.Metrics().AppendEmpty() m.SetName(fmt.Sprintf("metric.%d.test", rand.IntN(512))) @@ -863,7 +863,7 @@ func randomMetrics(t require.TestingT, rmCount int, smCount int, mCount int, dpC sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) sum.SetIsMonotonic(true) - for l := 0; l < dpCount; l++ { + for range dpCount { dp := sum.DataPoints().AppendEmpty() dp.SetTimestamp(timeStamp) @@ -893,7 +893,7 @@ func benchConsumeMetrics(b *testing.B, routingKey string, endpointsCount int, rm } endpoints := []string{} - for i := 0; i < endpointsCount; i++ { + for i := range endpointsCount { endpoints = append(endpoints, fmt.Sprintf("endpoint-%d", i)) } @@ -921,7 +921,7 @@ func benchConsumeMetrics(b *testing.B, routingKey string, endpointsCount int, rm b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { err = p.ConsumeMetrics(context.Background(), md) require.NoError(b, err) } diff --git a/exporter/loadbalancingexporter/trace_exporter.go b/exporter/loadbalancingexporter/trace_exporter.go index 608627a732f9e..afffefdb16d40 100644 --- a/exporter/loadbalancingexporter/trace_exporter.go +++ b/exporter/loadbalancingexporter/trace_exporter.go @@ -155,7 +155,7 @@ func routingIdentifiersFromTraces(td ptrace.Traces, key routingKey) (map[string] } if key == svcRouting { - for i := 0; i < rs.Len(); i++ { + for i := range rs.Len() { svc, ok := rs.At(i).Resource().Attributes().Get("service.name") if !ok { return nil, errors.New("unable to get service name") diff --git a/exporter/loadbalancingexporter/trace_exporter_test.go b/exporter/loadbalancingexporter/trace_exporter_test.go index e7fd69b5a5b07..9e6aefc9a58b0 100644 --- a/exporter/loadbalancingexporter/trace_exporter_test.go +++ b/exporter/loadbalancingexporter/trace_exporter_test.go @@ -569,7 +569,7 @@ func benchConsumeTraces(b *testing.B, endpointsCount int, tracesCount int) { } endpoints := []string{} - for i := 0; i < endpointsCount; i++ { + for i := range endpointsCount { endpoints = append(endpoints, fmt.Sprintf("endpoint-%d", i)) } @@ -594,8 +594,8 @@ func benchConsumeTraces(b *testing.B, endpointsCount int, tracesCount int) { trace1 := ptrace.NewTraces() trace2 := ptrace.NewTraces() - for i := 0; i < endpointsCount; i++ { - for j := 0; j < tracesCount/endpointsCount; j++ { + for i := range endpointsCount { + for range tracesCount / endpointsCount { appendSimpleTraceWithID(trace2.ResourceSpans().AppendEmpty(), [16]byte{1, 2, 6, byte(i)}) } } @@ -603,7 +603,7 @@ func benchConsumeTraces(b *testing.B, endpointsCount int, tracesCount int) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { err = p.ConsumeTraces(context.Background(), td) require.NoError(b, err) } diff --git a/exporter/logicmonitorexporter/internal/testutil/logs.go b/exporter/logicmonitorexporter/internal/testutil/logs.go index f894cad9ec827..3251794fb855c 100644 --- a/exporter/logicmonitorexporter/internal/testutil/logs.go +++ b/exporter/logicmonitorexporter/internal/testutil/logs.go @@ -24,7 +24,7 @@ func CreateLogData(numberOfLogs int) plog.Logs { rl.ScopeLogs().AppendEmpty() // Add an empty ScopeLogs ill := rl.ScopeLogs().AppendEmpty() - for i := 0; i < numberOfLogs; i++ { + for i := range numberOfLogs { ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := ill.LogRecords().AppendEmpty() logRecord.Body().SetStr("mylog") diff --git a/exporter/logicmonitorexporter/logs_exporter.go b/exporter/logicmonitorexporter/logs_exporter.go index 7c29b79597eb8..018543734e028 100644 --- a/exporter/logicmonitorexporter/logs_exporter.go +++ b/exporter/logicmonitorexporter/logs_exporter.go @@ -65,13 +65,13 @@ func (e *logExporter) PushLogData(ctx context.Context, lg plog.Logs) error { resourceLogs := lg.ResourceLogs() var payload []model.LogInput - for i := 0; i < resourceLogs.Len(); i++ { + for i := range resourceLogs.Len() { resourceLog := resourceLogs.At(i) libraryLogs := resourceLog.ScopeLogs() - for j := 0; j < libraryLogs.Len(); j++ { + for j := range libraryLogs.Len() { libraryLog := libraryLogs.At(j) logs := libraryLog.LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { logMetadataMap := make(map[string]any) resourceMapperMap := make(map[string]any) log := logs.At(k) diff --git a/exporter/logzioexporter/exporter.go b/exporter/logzioexporter/exporter.go index 546bb753e1a9a..520162d885841 100644 --- a/exporter/logzioexporter/exporter.go +++ b/exporter/logzioexporter/exporter.go @@ -125,13 +125,13 @@ func (exporter *logzioExporter) start(ctx context.Context, host component.Host) func (exporter *logzioExporter) pushLogData(ctx context.Context, ld plog.Logs) error { var dataBuffer bytes.Buffer resourceLogs := ld.ResourceLogs() - for i := 0; i < resourceLogs.Len(); i++ { + for i := range resourceLogs.Len() { resource := resourceLogs.At(i).Resource() scopeLogs := resourceLogs.At(i).ScopeLogs() - for j := 0; j < scopeLogs.Len(); j++ { + for j := range scopeLogs.Len() { logRecords := scopeLogs.At(j).LogRecords() scope := scopeLogs.At(j).Scope() - for k := 0; k < logRecords.Len(); k++ { + for k := range logRecords.Len() { log := logRecords.At(k) details := mergeMapEntries(resource.Attributes(), scope.Attributes(), log.Attributes()) details.PutStr(`scopeName`, scope.Name()) diff --git a/exporter/logzioexporter/exporter_test.go b/exporter/logzioexporter/exporter_test.go index 2943c50a9413f..31edc176fdefb 100644 --- a/exporter/logzioexporter/exporter_test.go +++ b/exporter/logzioexporter/exporter_test.go @@ -123,12 +123,12 @@ func testLogsExporter(t *testing.T, ld plog.Logs, cfg *Config) error { // Traces func newTestTracesWithAttributes() ptrace.Traces { td := ptrace.NewTraces() - for i := 0; i < 10; i++ { + for i := range 10 { s := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() s.SetName(fmt.Sprintf("%s-%d", testOperation, i)) s.SetTraceID(pcommon.TraceID([16]byte{byte(i), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1})) s.SetSpanID(pcommon.SpanID([8]byte{byte(i), 0, 0, 0, 0, 0, 0, 2})) - for j := 0; j < 5; j++ { + for j := range 5 { s.Attributes().PutStr(fmt.Sprintf("k%d", j), fmt.Sprintf("v%d", j)) } s.SetKind(ptrace.SpanKindServer) diff --git a/exporter/logzioexporter/internal/cache/lru_test.go b/exporter/logzioexporter/internal/cache/lru_test.go index 15487c6b797f2..369d474c7de35 100644 --- a/exporter/logzioexporter/internal/cache/lru_test.go +++ b/exporter/logzioexporter/internal/cache/lru_test.go @@ -144,7 +144,7 @@ func TestLRUCacheConcurrentAccess(*testing.T) { start := make(chan struct{}) var wg sync.WaitGroup - for i := 0; i < 20; i++ { + for range 20 { wg.Add(1) go func() { @@ -152,7 +152,7 @@ func TestLRUCacheConcurrentAccess(*testing.T) { <-start - for i := 0; i < 1000; i++ { + for range 1000 { cache.Get("A") } }() diff --git a/exporter/lokiexporter/internal/tenant/attribute.go b/exporter/lokiexporter/internal/tenant/attribute.go index dbdbc0ca9a6a4..523e4aba10eba 100644 --- a/exporter/lokiexporter/internal/tenant/attribute.go +++ b/exporter/lokiexporter/internal/tenant/attribute.go @@ -16,7 +16,7 @@ type AttributeTenantSource struct { } func (ts *AttributeTenantSource) GetTenant(_ context.Context, logs plog.Logs) (string, error) { - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { rl := logs.ResourceLogs().At(i) if v, found := rl.Resource().Attributes().Get(ts.Value); found { return v.Str(), nil diff --git a/exporter/mezmoexporter/exporter.go b/exporter/mezmoexporter/exporter.go index 46df8e26c1740..4df49ace84c4c 100644 --- a/exporter/mezmoexporter/exporter.go +++ b/exporter/mezmoexporter/exporter.go @@ -80,15 +80,15 @@ func (m *mezmoExporter) logDataToMezmo(ld plog.Logs) error { // Convert the log resources to mezmo lines... resourceLogs := ld.ResourceLogs() - for i := 0; i < resourceLogs.Len(); i++ { + for i := range resourceLogs.Len() { resource := resourceLogs.At(i).Resource() resourceHostName, hasResourceHostName := resource.Attributes().Get("host.name") scopeLogs := resourceLogs.At(i).ScopeLogs() - for j := 0; j < scopeLogs.Len(); j++ { + for j := range scopeLogs.Len() { logs := scopeLogs.At(j).LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { log := logs.At(k) // Convert Attributes to meta fields being mindful of the maxMetaDataSize restriction diff --git a/exporter/mezmoexporter/exporter_test.go b/exporter/mezmoexporter/exporter_test.go index 1e601f2210504..c5ae1ecff00ff 100644 --- a/exporter/mezmoexporter/exporter_test.go +++ b/exporter/mezmoexporter/exporter_test.go @@ -37,7 +37,7 @@ func createSimpleLogData(numberOfLogs int) plog.Logs { rl.ScopeLogs().AppendEmpty() // Add an empty ScopeLogs sl := rl.ScopeLogs().AppendEmpty() - for i := 0; i < numberOfLogs; i++ { + for i := range numberOfLogs { ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStr("10byteslog") @@ -58,7 +58,7 @@ func createMinimalAttributesLogData(numberOfLogs int) plog.Logs { rl.ScopeLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() - for i := 0; i < numberOfLogs; i++ { + for range numberOfLogs { logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStr("minimal attribute log") } @@ -77,7 +77,7 @@ func createMaxLogData() plog.Logs { lineLen := maxMessageSize lineCnt := (maxBodySize / lineLen) * 2 - for i := 0; i < lineCnt; i++ { + for i := range lineCnt { ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStr(randString(maxMessageSize)) diff --git a/exporter/opencensusexporter/opencensus.go b/exporter/opencensusexporter/opencensus.go index 0cedb5130ab65..7a8298deccd6b 100644 --- a/exporter/opencensusexporter/opencensus.go +++ b/exporter/opencensusexporter/opencensus.go @@ -81,7 +81,7 @@ func (oce *ocExporter) start(ctx context.Context, host component.Host) error { if oce.tracesClients != nil { oce.traceSvcClient = agenttracepb.NewTraceServiceClient(oce.grpcClientConn) // Try to create rpc clients now. - for i := 0; i < oce.cfg.NumWorkers; i++ { + for range oce.cfg.NumWorkers { // Populate the channel with NumWorkers nil RPCs to keep the number of workers // constant in the channel. oce.tracesClients <- nil @@ -91,7 +91,7 @@ func (oce *ocExporter) start(ctx context.Context, host component.Host) error { if oce.metricsClients != nil { oce.metricsSvcClient = agentmetricspb.NewMetricsServiceClient(oce.grpcClientConn) // Try to create rpc clients now. - for i := 0; i < oce.cfg.NumWorkers; i++ { + for range oce.cfg.NumWorkers { // Populate the channel with NumWorkers nil RPCs to keep the number of workers // constant in the channel. oce.metricsClients <- nil @@ -106,7 +106,7 @@ func (oce *ocExporter) shutdown(context.Context) error { } if oce.tracesClients != nil { // First remove all the clients from the channel. - for i := 0; i < oce.cfg.NumWorkers; i++ { + for range oce.cfg.NumWorkers { <-oce.tracesClients } // Now close the channel @@ -114,7 +114,7 @@ func (oce *ocExporter) shutdown(context.Context) error { } if oce.metricsClients != nil { // First remove all the clients from the channel. - for i := 0; i < oce.cfg.NumWorkers; i++ { + for range oce.cfg.NumWorkers { <-oce.metricsClients } // Now close the channel @@ -164,7 +164,7 @@ func (oce *ocExporter) pushTraces(_ context.Context, td ptrace.Traces) error { } rss := td.ResourceSpans() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { node, resource, spans := opencensus.ResourceSpansToOC(rss.At(i)) // This is a hack because OC protocol expects a Node for the initial message. if node == nil { @@ -213,7 +213,7 @@ func (oce *ocExporter) pushMetrics(_ context.Context, md pmetric.Metrics) error } rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { ocReq := agentmetricspb.ExportMetricsServiceRequest{} ocReq.Node, ocReq.Resource, ocReq.Metrics = opencensus.ResourceMetricsToOC(rms.At(i)) diff --git a/exporter/opencensusexporter/opencensus_test.go b/exporter/opencensusexporter/opencensus_test.go index 9e03306cf174b..6838ad8d7edf3 100644 --- a/exporter/opencensusexporter/opencensus_test.go +++ b/exporter/opencensusexporter/opencensus_test.go @@ -97,7 +97,7 @@ func TestSendTraces_NoBackend(t *testing.T) { }) td := testdata.GenerateTraces(1) - for i := 0; i < 10000; i++ { + for range 10000 { assert.Error(t, exp.ConsumeTraces(context.Background(), td)) } } @@ -194,7 +194,7 @@ func TestSendMetrics_NoBackend(t *testing.T) { }) md := testdata.GenerateMetrics(1) - for i := 0; i < 10000; i++ { + for range 10000 { assert.Error(t, exp.ConsumeMetrics(context.Background(), md)) } } diff --git a/exporter/opensearchexporter/encoder.go b/exporter/opensearchexporter/encoder.go index ec9696fed2490..3e270f31d80c2 100644 --- a/exporter/opensearchexporter/encoder.go +++ b/exporter/opensearchexporter/encoder.go @@ -168,7 +168,7 @@ func (m *encodeModel) encodeTrace( if span.Events().Len() > 0 { sso.Events = make([]ssoSpanEvent, span.Events().Len()) - for i := 0; i < span.Events().Len(); i++ { + for i := range span.Events().Len() { e := span.Events().At(i) ssoEvent := &sso.Events[i] ssoEvent.Attributes = e.Attributes().AsRaw() @@ -206,7 +206,7 @@ func (m *encodeModel) encodeTrace( if span.Links().Len() > 0 { sso.Links = make([]ssoSpanLinks, span.Links().Len()) - for i := 0; i < span.Links().Len(); i++ { + for i := range span.Links().Len() { link := span.Links().At(i) ssoLink := &sso.Links[i] ssoLink.Attributes = link.Attributes().AsRaw() diff --git a/exporter/opensearchexporter/internal/objmodel/objmodel.go b/exporter/opensearchexporter/internal/objmodel/objmodel.go index 74172bbea8b92..195597eada9ab 100644 --- a/exporter/opensearchexporter/internal/objmodel/objmodel.go +++ b/exporter/opensearchexporter/internal/objmodel/objmodel.go @@ -204,7 +204,7 @@ func (doc *Document) Dedup() { // // This step removes potential conflicts when dedotting and serializing fields. var renamed bool - for i := 0; i < len(doc.fields)-1; i++ { + for i := range len(doc.fields) - 1 { key, nextKey := doc.fields[i].key, doc.fields[i+1].key if len(key) < len(nextKey) && strings.HasPrefix(nextKey, key) && nextKey[len(key)] == '.' { renamed = true @@ -219,7 +219,7 @@ func (doc *Document) Dedup() { // // This step ensures that we do not have duplicate fields names when serializing. // OpenSearch JSON parser will fail otherwise. - for i := 0; i < len(doc.fields)-1; i++ { + for i := range len(doc.fields) - 1 { if doc.fields[i].key == doc.fields[i+1].key { doc.fields[i].value = ignoreValue } @@ -486,7 +486,7 @@ func arrFromAttributes(aa pcommon.Slice) []Value { } values := make([]Value, aa.Len()) - for i := 0; i < aa.Len(); i++ { + for i := range aa.Len() { values[i] = ValueFromAttribute(aa.At(i)) } return values @@ -528,7 +528,7 @@ func commonObjPrefix(a, b string) int { end = alt } - for i := 0; i < end; i++ { + for i := range end { if a[i] != b[i] { return i } diff --git a/exporter/opensearchexporter/log_bulk_indexer.go b/exporter/opensearchexporter/log_bulk_indexer.go index ccf548bb8d67c..5e316f591657f 100644 --- a/exporter/opensearchexporter/log_bulk_indexer.go +++ b/exporter/opensearchexporter/log_bulk_indexer.go @@ -125,15 +125,15 @@ func newLogOpenSearchBulkIndexer(client *opensearch.Client, onIndexerError func( func forEachLog(ld plog.Logs, visitor func(pcommon.Resource, string, pcommon.InstrumentationScope, string, plog.LogRecord)) { resourceLogs := ld.ResourceLogs() - for i := 0; i < resourceLogs.Len(); i++ { + for i := range resourceLogs.Len() { il := resourceLogs.At(i) resource := il.Resource() scopeLogs := il.ScopeLogs() - for j := 0; j < scopeLogs.Len(); j++ { + for j := range scopeLogs.Len() { scopeSpan := scopeLogs.At(j) logs := scopeLogs.At(j).LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { log := logs.At(k) visitor(resource, il.SchemaUrl(), scopeSpan.Scope(), scopeSpan.SchemaUrl(), log) } diff --git a/exporter/opensearchexporter/trace_bulk_indexer.go b/exporter/opensearchexporter/trace_bulk_indexer.go index 4e7bcccc8a866..17f0a0424116e 100644 --- a/exporter/opensearchexporter/trace_bulk_indexer.go +++ b/exporter/opensearchexporter/trace_bulk_indexer.go @@ -157,15 +157,15 @@ func newOpenSearchBulkIndexer(client *opensearch.Client, onIndexerError func(con func forEachSpan(td ptrace.Traces, visitor func(pcommon.Resource, string, pcommon.InstrumentationScope, string, ptrace.Span)) { resourceSpans := td.ResourceSpans() - for i := 0; i < resourceSpans.Len(); i++ { + for i := range resourceSpans.Len() { il := resourceSpans.At(i) resource := il.Resource() scopeSpans := il.ScopeSpans() - for j := 0; j < scopeSpans.Len(); j++ { + for j := range scopeSpans.Len() { scopeSpan := scopeSpans.At(j) spans := scopeSpans.At(j).Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { span := spans.At(k) visitor(resource, il.SchemaUrl(), scopeSpan.Scope(), scopeSpan.SchemaUrl(), span) } diff --git a/exporter/otelarrowexporter/internal/arrow/bestofn.go b/exporter/otelarrowexporter/internal/arrow/bestofn.go index 317835cd69cc2..7de9292e09a9c 100644 --- a/exporter/otelarrowexporter/internal/arrow/bestofn.go +++ b/exporter/otelarrowexporter/internal/arrow/bestofn.go @@ -52,7 +52,7 @@ func newBestOfNPrioritizer(dc doneCancel, numChoices, numStreams int, lf loadFun // Limit numChoices to the number of streams. numChoices = min(numStreams, numChoices) - for i := 0; i < numStreams; i++ { + for range numStreams { ws := &streamWorkState{ maxStreamLifetime: addJitter(maxLifetime), waiters: map[int64]chan<- error{}, @@ -70,7 +70,7 @@ func newBestOfNPrioritizer(dc doneCancel, numChoices, numStreams int, lf loadFun loadFunc: lf, } - for i := 0; i < numStreams; i++ { + for range numStreams { // TODO It's not clear if/when the prioritizer can // become a bottleneck. go lp.run() @@ -142,11 +142,11 @@ func (lp *bestOfNPrioritizer) streamFor(_ writeItem, rnd *rand.Rand, tmp []strea } // Select numChoices at random by shifting the selection into the start // of the temporary slice. - for i := 0; i < lp.numChoices; i++ { + for i := range lp.numChoices { pick := rnd.IntN(lp.numChoices - i) tmp[i], tmp[i+pick] = tmp[i+pick], tmp[i] } - for i := 0; i < lp.numChoices; i++ { + for i := range lp.numChoices { // TODO: skip channels w/ a pending item (maybe) tmp[i].load = lp.loadFunc(tmp[i].work) } diff --git a/exporter/otelarrowexporter/internal/arrow/exporter_test.go b/exporter/otelarrowexporter/internal/arrow/exporter_test.go index 34bcfaa1f71b4..51f5c9b3b71f7 100644 --- a/exporter/otelarrowexporter/internal/arrow/exporter_test.go +++ b/exporter/otelarrowexporter/internal/arrow/exporter_test.go @@ -501,7 +501,7 @@ func TestArrowExporterStreamRace(t *testing.T) { // stream, but none will become available. Eventually the // context will be canceled and cause these goroutines to // return. - for i := 0; i < 5; i++ { + for range 5 { wg.Add(1) go func() { defer wg.Done() @@ -556,7 +556,7 @@ func TestArrowExporterStreaming(t *testing.T) { } }() - for times := 0; times < 10; times++ { + for range 10 { input := testdata.GenerateTraces(2) sent, err := tc.exporter.SendAndWait(context.Background(), input) @@ -615,7 +615,7 @@ func TestArrowExporterHeaders(t *testing.T) { } }() - for times := 0; times < 10; times++ { + for times := range 10 { input := testdata.GenerateTraces(2) if times%2 == 1 { @@ -707,7 +707,7 @@ func TestArrowExporterIsTraced(t *testing.T) { } }() - for times := 0; times < 10; times++ { + for times := range 10 { input := testdata.GenerateTraces(2) callCtx := context.Background() @@ -750,7 +750,7 @@ func TestAddJitter(t *testing.T) { require.Equal(t, time.Duration(0), addJitter(0)) // Expect no more than 5% less in each trial. - for i := 0; i < 100; i++ { + for range 100 { x := addJitter(20 * time.Minute) require.LessOrEqual(t, 19*time.Minute, x) require.Less(t, x, 20*time.Minute) @@ -929,7 +929,7 @@ func benchmarkPrioritizer(b *testing.B, numStreams int, pname PrioritizerName) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { sent, err := tc.exporter.SendAndWait(bg, input) if err != nil || !sent { b.Errorf("send failed: %v: %v", sent, err) diff --git a/exporter/otelarrowexporter/metadata_test.go b/exporter/otelarrowexporter/metadata_test.go index 175dd4b177a78..dc3806deeb68c 100644 --- a/exporter/otelarrowexporter/metadata_test.go +++ b/exporter/otelarrowexporter/metadata_test.go @@ -85,10 +85,10 @@ func TestSendTracesWithMetadata(t *testing.T) { requestCount := 3 spansPerRequest := 33 - for requestNum := 0; requestNum < requestCount; requestNum++ { + for requestNum := range requestCount { td := testdata.GenerateTraces(spansPerRequest) spans := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans() - for spanIndex := 0; spanIndex < spansPerRequest; spanIndex++ { + for spanIndex := range spansPerRequest { spans.At(spanIndex).SetName(fmt.Sprintf("%d-%d", requestNum, spanIndex)) } @@ -172,7 +172,7 @@ func TestMetadataExporterCardinalityLimit(t *testing.T) { // Ensure that initially there is no data in the receiver. assert.EqualValues(t, 0, rcv.requestCount.Load()) - for requestNum := 0; requestNum < cardLimit; requestNum++ { + for requestNum := range cardLimit { td := testdata.GenerateTraces(1) ctx := client.NewContext(bg, client.Info{ Metadata: client.NewMetadata(map[string][]string{ diff --git a/exporter/prometheusexporter/accumulator.go b/exporter/prometheusexporter/accumulator.go index 1a32c41e09f6e..03b8aad34d71d 100644 --- a/exporter/prometheusexporter/accumulator.go +++ b/exporter/prometheusexporter/accumulator.go @@ -63,11 +63,11 @@ func (a *lastValueAccumulator) Accumulate(rm pmetric.ResourceMetrics) (n int) { ilms := rm.ScopeMetrics() resourceAttrs := rm.Resource().Attributes() - for i := 0; i < ilms.Len(); i++ { + for i := range ilms.Len() { ilm := ilms.At(i) metrics := ilm.Metrics() - for j := 0; j < metrics.Len(); j++ { + for j := range metrics.Len() { n += a.addMetric(metrics.At(j), ilm.Scope(), resourceAttrs, now) } } @@ -99,7 +99,7 @@ func (a *lastValueAccumulator) addMetric(metric pmetric.Metric, il pcommon.Instr func (a *lastValueAccumulator) accumulateSummary(metric pmetric.Metric, il pcommon.InstrumentationScope, resourceAttrs pcommon.Map, now time.Time) (n int) { dps := metric.Summary().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { ip := dps.At(i) signature := timeseriesSignature(il.Name(), metric, ip.Attributes(), resourceAttrs) @@ -128,7 +128,7 @@ func (a *lastValueAccumulator) accumulateSummary(metric pmetric.Metric, il pcomm func (a *lastValueAccumulator) accumulateGauge(metric pmetric.Metric, il pcommon.InstrumentationScope, resourceAttrs pcommon.Map, now time.Time) (n int) { dps := metric.Gauge().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { ip := dps.At(i) signature := timeseriesSignature(il.Name(), metric, ip.Attributes(), resourceAttrs) @@ -174,7 +174,7 @@ func (a *lastValueAccumulator) accumulateSum(metric pmetric.Metric, il pcommon.I } dps := doubleSum.DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { ip := dps.At(i) signature := timeseriesSignature(il.Name(), metric, ip.Attributes(), resourceAttrs) @@ -226,7 +226,7 @@ func (a *lastValueAccumulator) accumulateHistogram(metric pmetric.Metric, il pco a.logger.Debug("Accumulate histogram.....") dps := histogram.DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { ip := dps.At(i) signature := timeseriesSignature(il.Name(), metric, ip.Attributes(), resourceAttrs) // uniquely identify this time series you are accumulating for @@ -368,7 +368,7 @@ func accumulateHistogramValues(prev, current, dest pmetric.HistogramDataPoint) { dest.SetSum(newer.Sum() + older.Sum()) counts := make([]uint64, newer.BucketCounts().Len()) - for i := 0; i < newer.BucketCounts().Len(); i++ { + for i := range newer.BucketCounts().Len() { counts[i] = newer.BucketCounts().At(i) + older.BucketCounts().At(i) } dest.BucketCounts().FromRaw(counts) diff --git a/exporter/prometheusexporter/accumulator_test.go b/exporter/prometheusexporter/accumulator_test.go index 693211453d3d2..16a8c0cceffb1 100644 --- a/exporter/prometheusexporter/accumulator_test.go +++ b/exporter/prometheusexporter/accumulator_test.go @@ -420,11 +420,11 @@ func TestAccumulateDeltaToCumulativeHistogram(t *testing.T) { require.Equal(t, m1.Sum()+m2.Sum(), v.Sum()) require.Equal(t, m1.Count()+m2.Count(), v.Count()) - for i := 0; i < v.BucketCounts().Len(); i++ { + for i := range v.BucketCounts().Len() { require.Equal(t, m1.BucketCounts().At(i)+m2.BucketCounts().At(i), v.BucketCounts().At(i)) } - for i := 0; i < v.ExplicitBounds().Len(); i++ { + for i := range v.ExplicitBounds().Len() { require.Equal(t, m2.ExplicitBounds().At(i), v.ExplicitBounds().At(i)) } }) @@ -454,11 +454,11 @@ func TestAccumulateDeltaToCumulativeHistogram(t *testing.T) { require.Equal(t, m1.Sum(), v.Sum()) require.Equal(t, m1.Count(), v.Count()) - for i := 0; i < v.BucketCounts().Len(); i++ { + for i := range v.BucketCounts().Len() { require.Equal(t, m1.BucketCounts().At(i), v.BucketCounts().At(i)) } - for i := 0; i < v.ExplicitBounds().Len(); i++ { + for i := range v.ExplicitBounds().Len() { require.Equal(t, m1.ExplicitBounds().At(i), v.ExplicitBounds().At(i)) } }) @@ -488,11 +488,11 @@ func TestAccumulateDeltaToCumulativeHistogram(t *testing.T) { require.Equal(t, m2.Sum(), v.Sum()) require.Equal(t, m2.Count(), v.Count()) - for i := 0; i < v.BucketCounts().Len(); i++ { + for i := range v.BucketCounts().Len() { require.Equal(t, m2.BucketCounts().At(i), v.BucketCounts().At(i)) } - for i := 0; i < v.ExplicitBounds().Len(); i++ { + for i := range v.ExplicitBounds().Len() { require.Equal(t, m2.ExplicitBounds().At(i), v.ExplicitBounds().At(i)) } }) @@ -522,11 +522,11 @@ func TestAccumulateDeltaToCumulativeHistogram(t *testing.T) { require.Equal(t, m1.Sum(), v.Sum()) require.Equal(t, m1.Count(), v.Count()) - for i := 0; i < v.BucketCounts().Len(); i++ { + for i := range v.BucketCounts().Len() { require.Equal(t, m1.BucketCounts().At(i), v.BucketCounts().At(i)) } - for i := 0; i < v.ExplicitBounds().Len(); i++ { + for i := range v.ExplicitBounds().Len() { require.Equal(t, m1.ExplicitBounds().At(i), v.ExplicitBounds().At(i)) } }) @@ -556,11 +556,11 @@ func TestAccumulateDeltaToCumulativeHistogram(t *testing.T) { require.Equal(t, m2.Sum(), v.Sum()) require.Equal(t, m2.Count(), v.Count()) - for i := 0; i < v.BucketCounts().Len(); i++ { + for i := range v.BucketCounts().Len() { require.Equal(t, m2.BucketCounts().At(i), v.BucketCounts().At(i)) } - for i := 0; i < v.ExplicitBounds().Len(); i++ { + for i := range v.ExplicitBounds().Len() { require.Equal(t, m2.ExplicitBounds().At(i), v.ExplicitBounds().At(i)) } }) diff --git a/exporter/prometheusexporter/collector.go b/exporter/prometheusexporter/collector.go index dd3891095473e..e514bad6457c4 100644 --- a/exporter/prometheusexporter/collector.go +++ b/exporter/prometheusexporter/collector.go @@ -57,7 +57,7 @@ func convertExemplars(exemplars pmetric.ExemplarSlice) []prometheus.Exemplar { length := exemplars.Len() result := make([]prometheus.Exemplar, length) - for i := 0; i < length; i++ { + for i := range length { e := exemplars.At(i) exemplarLabels := make(prometheus.Labels, 0) @@ -231,7 +231,7 @@ func (c *collector) convertSummary(metric pmetric.Metric, resourceAttrs pcommon. quantiles := make(map[float64]float64) qv := point.QuantileValues() - for j := 0; j < qv.Len(); j++ { + for j := range qv.Len() { qvj := qv.At(j) // There should be EXACTLY one quantile value lest it is an invalid exposition. quantiles[qvj.Quantile()] = qvj.Value() @@ -265,7 +265,7 @@ func (c *collector) convertDoubleHistogram(metric pmetric.Metric, resourceAttrs indicesMap := make(map[float64]int) buckets := make([]float64, 0, ip.BucketCounts().Len()) - for index := 0; index < ip.ExplicitBounds().Len(); index++ { + for index := range ip.ExplicitBounds().Len() { bucket := ip.ExplicitBounds().At(index) if _, added := indicesMap[bucket]; !added { indicesMap[bucket] = index diff --git a/exporter/prometheusexporter/prometheus.go b/exporter/prometheusexporter/prometheus.go index 8de4fa367e880..61d1cb2df36ff 100644 --- a/exporter/prometheusexporter/prometheus.go +++ b/exporter/prometheusexporter/prometheus.go @@ -83,7 +83,7 @@ func (pe *prometheusExporter) Start(ctx context.Context, host component.Host) er func (pe *prometheusExporter) ConsumeMetrics(_ context.Context, md pmetric.Metrics) error { n := 0 rmetrics := md.ResourceMetrics() - for i := 0; i < rmetrics.Len(); i++ { + for i := range rmetrics.Len() { n += pe.collector.processMetrics(rmetrics.At(i)) } diff --git a/exporter/prometheusexporter/prometheus_test.go b/exporter/prometheusexporter/prometheus_test.go index 563f7803eaffc..55f949b215b6a 100644 --- a/exporter/prometheusexporter/prometheus_test.go +++ b/exporter/prometheusexporter/prometheus_test.go @@ -63,7 +63,7 @@ func TestPrometheusExporter(t *testing.T) { set := exportertest.NewNopSettings() for _, tt := range tests { // Run it a few times to ensure that shutdowns exit cleanly. - for j := 0; j < 3; j++ { + for range 3 { exp, err := factory.CreateMetrics(context.Background(), set, tt.config) if tt.wantErr != "" { diff --git a/exporter/prometheusremotewriteexporter/exporter.go b/exporter/prometheusremotewriteexporter/exporter.go index 6829bb57201c8..9d1f4b0d1bff4 100644 --- a/exporter/prometheusremotewriteexporter/exporter.go +++ b/exporter/prometheusremotewriteexporter/exporter.go @@ -277,7 +277,7 @@ func (prwe *prwExporter) export(ctx context.Context, requests []*prompb.WriteReq var errs error // Run concurrencyLimit of workers until there // is no more requests to execute in the input channel. - for i := 0; i < concurrencyLimit; i++ { + for range concurrencyLimit { go func() { defer wg.Done() for { diff --git a/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go b/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go index 5cd98bc458e78..3705cedda08d6 100644 --- a/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go @@ -34,10 +34,10 @@ func Test_PushMetricsConcurrent(t *testing.T) { n := 1000 ms := make([]pmetric.Metrics, n) testIDKey := "test_id" - for i := 0; i < n; i++ { + for i := range n { m := testdata.GenerateMetricsOneMetric() dps := m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints() - for j := 0; j < dps.Len(); j++ { + for j := range dps.Len() { dp := dps.At(j) dp.Attributes().PutInt(testIDKey, int64(i)) } diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index 355d0dc808455..b6acf37a33fe8 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -239,7 +239,7 @@ func Test_Shutdown(t *testing.T) { err := prwe.Shutdown(context.Background()) require.NoError(t, err) errChan := make(chan error, 5) - for i := 0; i < 5; i++ { + for range 5 { wg.Add(1) go func() { defer wg.Done() @@ -1194,7 +1194,7 @@ func benchmarkExecute(b *testing.B, numSample int) { generateSamples := func(n int) []prompb.Sample { samples := make([]prompb.Sample, 0, n) - for i := 0; i < n; i++ { + for i := range n { samples = append(samples, prompb.Sample{ Timestamp: int64(i), Value: float64(i), @@ -1205,7 +1205,7 @@ func benchmarkExecute(b *testing.B, numSample int) { generateHistograms := func(n int) []prompb.Histogram { histograms := make([]prompb.Histogram, 0, n) - for i := 0; i < n; i++ { + for i := range n { histograms = append(histograms, prompb.Histogram{ Timestamp: int64(i), Count: &prompb.Histogram_CountInt{CountInt: uint64(i)}, @@ -1217,7 +1217,7 @@ func benchmarkExecute(b *testing.B, numSample int) { reqs := make([]*prompb.WriteRequest, 0, b.N) const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好DzieÅ„dobry9Zd8ra765v4stvuyte" - for n := 0; n < b.N; n++ { + for n := range b.N { num := strings.Repeat(strconv.Itoa(n), 16) req := &prompb.WriteRequest{ Metadata: []prompb.MetricMetadata{ @@ -1308,13 +1308,13 @@ func benchmarkPushMetrics(b *testing.B, numMetrics, numConsumers int) { require.NoError(b, err) var metrics []pmetric.Metrics - for n := 0; n < b.N; n++ { + for n := range b.N { actualNumMetrics := numMetrics if numMetrics == -1 { actualNumMetrics = int(math.Pow(10, float64(n%4+1))) } m := testdata.GenerateMetricsManyMetricsSameResource(actualNumMetrics) - for i := 0; i < m.MetricCount(); i++ { + for i := range m.MetricCount() { dp := m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(i).Sum().DataPoints().AppendEmpty() dp.SetIntValue(int64(i)) // We add a random key to the attributes to ensure that we create a new time series during translation for each metric. diff --git a/exporter/prometheusremotewriteexporter/helper_test.go b/exporter/prometheusremotewriteexporter/helper_test.go index 46a61735a1c71..ad2a38b18907f 100644 --- a/exporter/prometheusremotewriteexporter/helper_test.go +++ b/exporter/prometheusremotewriteexporter/helper_test.go @@ -89,7 +89,7 @@ func Test_batchTimeSeriesUpdatesStateForLargeBatches(t *testing.T) { // Benchmark for large data sizes // First allocate 100k time series tsArray := make([]*prompb.TimeSeries, 0, 100000) - for i := 0; i < 100000; i++ { + for range 100000 { ts := getTimeSeries(labels, sample1, sample2, sample3) tsArray = append(tsArray, ts) } @@ -119,7 +119,7 @@ func Benchmark_batchTimeSeries(b *testing.B) { // Benchmark for large data sizes // First allocate 100k time series tsArray := make([]*prompb.TimeSeries, 0, 100000) - for i := 0; i < 100000; i++ { + for range 100000 { ts := getTimeSeries(labels, sample1, sample2, sample3) tsArray = append(tsArray, ts) } @@ -131,7 +131,7 @@ func Benchmark_batchTimeSeries(b *testing.B) { state := newBatchTimeServicesState() // Run batchTimeSeries 100 times with a 1mb max request size - for i := 0; i < b.N; i++ { + for range b.N { requests, err := batchTimeSeries(tsMap1, 1000000, nil, state) assert.NoError(b, err) assert.Len(b, requests, 18) @@ -240,7 +240,7 @@ func TestEnsureTimeseriesPointsAreSortedByTimestamp(t *testing.T) { for ti, ts := range got.Timeseries { for i := range ts.Samples { si := ts.Samples[i] - for j := 0; j < i; j++ { + for j := range i { sj := ts.Samples[j] assert.LessOrEqual(t, sj.Timestamp, si.Timestamp, "Timeseries[%d]: Sample[%d].Timestamp(%d) > Sample[%d].Timestamp(%d)", ti, j, sj.Timestamp, i, si.Timestamp) diff --git a/exporter/prometheusremotewriteexporter/testutil_test.go b/exporter/prometheusremotewriteexporter/testutil_test.go index 02e1167164c32..b790d44b67713 100644 --- a/exporter/prometheusremotewriteexporter/testutil_test.go +++ b/exporter/prometheusremotewriteexporter/testutil_test.go @@ -177,7 +177,7 @@ func getMetricsFromMetricList(metricList ...pmetric.Metric) pmetric.Metrics { rm := metrics.ResourceMetrics().AppendEmpty() ilm := rm.ScopeMetrics().AppendEmpty() ilm.Metrics().EnsureCapacity(len(metricList)) - for i := 0; i < len(metricList); i++ { + for i := range metricList { metricList[i].CopyTo(ilm.Metrics().AppendEmpty()) } @@ -376,7 +376,7 @@ func getQuantiles(bounds []float64, values []float64) pmetric.SummaryDataPointVa quantiles := pmetric.NewSummaryDataPointValueAtQuantileSlice() quantiles.EnsureCapacity(len(bounds)) - for i := 0; i < len(bounds); i++ { + for i := range bounds { quantile := quantiles.AppendEmpty() quantile.SetQuantile(bounds[i]) quantile.SetValue(values[i]) diff --git a/exporter/prometheusremotewriteexporter/wal.go b/exporter/prometheusremotewriteexporter/wal.go index 4f0ebea5e25b0..6e4cada37bd8b 100644 --- a/exporter/prometheusremotewriteexporter/wal.go +++ b/exporter/prometheusremotewriteexporter/wal.go @@ -328,7 +328,7 @@ func (prweWAL *prweWAL) persistToWAL(requests []*prompb.WriteRequest) error { func (prweWAL *prweWAL) readPrompbFromWAL(ctx context.Context, index uint64) (wreq *prompb.WriteRequest, err error) { var protoBlob []byte - for i := 0; i < 12; i++ { + for range 12 { // Firstly check if we've been terminated, then exit if so. select { case <-ctx.Done(): diff --git a/exporter/prometheusremotewriteexporter/wal_test.go b/exporter/prometheusremotewriteexporter/wal_test.go index 97c40b381ba99..8e7f6497a5824 100644 --- a/exporter/prometheusremotewriteexporter/wal_test.go +++ b/exporter/prometheusremotewriteexporter/wal_test.go @@ -93,7 +93,7 @@ func TestWALStopManyTimes(t *testing.T) { // Ensure that invoking .stop() multiple times doesn't cause a panic, but actually // First close should NOT return an error. require.NoError(t, pwal.stop()) - for i := 0; i < 4; i++ { + for range 4 { // Every invocation to .stop() should return an errAlreadyClosed. require.ErrorIs(t, pwal.stop(), errAlreadyClosed) } diff --git a/exporter/sapmexporter/exporter_test.go b/exporter/sapmexporter/exporter_test.go index 16aec900eb096..60ecf752b3ad2 100644 --- a/exporter/sapmexporter/exporter_test.go +++ b/exporter/sapmexporter/exporter_test.go @@ -51,7 +51,7 @@ func buildTestTraces(setTokenLabel bool) (traces ptrace.Traces) { rss := traces.ResourceSpans() rss.EnsureCapacity(20) - for i := 0; i < 20; i++ { + for i := range 20 { rs := rss.AppendEmpty() resource := rs.Resource() resource.Attributes().PutStr("key1", "value1") @@ -118,7 +118,7 @@ func hasToken(batches []*model.Batch) bool { func buildTestTrace() (ptrace.Traces, error) { trace := ptrace.NewTraces() trace.ResourceSpans().EnsureCapacity(2) - for i := 0; i < 2; i++ { + for i := range 2 { rs := trace.ResourceSpans().AppendEmpty() resource := rs.Resource() resource.Attributes().PutStr("com.splunk.signalfx.access_token", fmt.Sprintf("TraceAccessToken%v", i)) diff --git a/exporter/sentryexporter/sentry_exporter.go b/exporter/sentryexporter/sentry_exporter.go index 4309971730895..db9f7021591f6 100644 --- a/exporter/sentryexporter/sentry_exporter.go +++ b/exporter/sentryexporter/sentry_exporter.go @@ -90,17 +90,17 @@ func (s *SentryExporter) pushTraceData(_ context.Context, td ptrace.Traces) erro // Maps root span id to a transaction. transactionMap := make(map[sentry.SpanID]*sentry.Event) - for i := 0; i < resourceSpans.Len(); i++ { + for i := range resourceSpans.Len() { rs := resourceSpans.At(i) resourceTags := generateTagsFromResource(rs.Resource()) ilss := rs.ScopeSpans() - for j := 0; j < ilss.Len(); j++ { + for j := range ilss.Len() { ils := ilss.At(j) library := ils.Scope() spans := ils.Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { otelSpan := spans.At(k) sentrySpan := convertToSentrySpan(otelSpan, library, resourceTags) convertEventsToSentryExceptions(&exceptionEvents, otelSpan.Events(), sentrySpan) @@ -161,7 +161,7 @@ func generateTransactions(transactionMap map[sentry.SpanID]*sentry.Event, orphan // convertEventsToSentryExceptions creates a set of sentry events from exception events present in spans. // These events are stored in a mutated eventList func convertEventsToSentryExceptions(eventList *[]*sentry.Event, events ptrace.SpanEventSlice, sentrySpan *sentry.Span) { - for i := 0; i < events.Len(); i++ { + for i := range events.Len() { event := events.At(i) if event.Name() != "exception" { continue diff --git a/exporter/signalfxexporter/eventclient.go b/exporter/signalfxexporter/eventclient.go index c6471e602d5b8..b554825b2cec0 100644 --- a/exporter/signalfxexporter/eventclient.go +++ b/exporter/signalfxexporter/eventclient.go @@ -39,10 +39,10 @@ func (s *sfxEventClient) pushLogsData(ctx context.Context, ld plog.Logs) (int, e var sfxEvents []*sfxpb.Event numDroppedLogRecords := 0 - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rl := rls.At(i) ills := rl.ScopeLogs() - for j := 0; j < ills.Len(); j++ { + for j := range ills.Len() { sl := ills.At(j) events, dropped := translation.LogRecordSliceToSignalFxV2(s.logger, sl.LogRecords(), rl.Resource().Attributes()) sfxEvents = append(sfxEvents, events...) diff --git a/exporter/signalfxexporter/exporter_test.go b/exporter/signalfxexporter/exporter_test.go index 963c49de69d51..6199cd8a3f6d7 100644 --- a/exporter/signalfxexporter/exporter_test.go +++ b/exporter/signalfxexporter/exporter_test.go @@ -1048,7 +1048,7 @@ func generateLargeDPBatch() pmetric.Metrics { md.ResourceMetrics().EnsureCapacity(6500) ts := time.Now() - for i := 0; i < 6500; i++ { + for i := range 6500 { rm := md.ResourceMetrics().AppendEmpty() ilm := rm.ScopeMetrics().AppendEmpty() m := ilm.Metrics().AppendEmpty() @@ -1072,7 +1072,7 @@ func generateLargeEventBatch() plog.Logs { batchSize := 65000 logs.EnsureCapacity(batchSize) ts := time.Now() - for i := 0; i < batchSize; i++ { + for range batchSize { lr := logs.AppendEmpty() lr.Attributes().PutStr("k0", "k1") lr.Attributes().PutEmpty("com.splunk.signalfx.event_category") @@ -1449,7 +1449,7 @@ func BenchmarkExporterConsumeData(b *testing.B) { batchSize := 1000 metrics := pmetric.NewMetrics() tmd := testMetricsData(false) - for i := 0; i < batchSize; i++ { + for range batchSize { tmd.ResourceMetrics().At(0).CopyTo(metrics.ResourceMetrics().AppendEmpty()) } @@ -1477,7 +1477,7 @@ func BenchmarkExporterConsumeData(b *testing.B) { converter: c, } - for i := 0; i < b.N; i++ { + for range b.N { numDroppedTimeSeries, err := dpClient.pushMetricsData(context.Background(), metrics) assert.NoError(b, err) assert.Equal(b, 0, numDroppedTimeSeries) @@ -1665,7 +1665,7 @@ func TestDefaultSystemCPUTimeExcludedAndTranslated(t *testing.T) { m.SetName("system.cpu.time") sum := m.SetEmptySum() for _, state := range []string{"idle", "interrupt", "nice", "softirq", "steal", "system", "user", "wait"} { - for cpu := 0; cpu < 32; cpu++ { + for cpu := range 32 { dp := sum.DataPoints().AppendEmpty() dp.SetDoubleValue(0) dp.Attributes().PutStr("cpu", fmt.Sprintf("%d", cpu)) @@ -1814,7 +1814,7 @@ func BenchmarkExporterConsumeDataWithOTLPHistograms(b *testing.B) { batchSize := 1000 metrics := pmetric.NewMetrics() tmd := testMetricsData(true) - for i := 0; i < batchSize; i++ { + for range batchSize { tmd.ResourceMetrics().At(0).CopyTo(metrics.ResourceMetrics().AppendEmpty()) } @@ -1842,7 +1842,7 @@ func BenchmarkExporterConsumeDataWithOTLPHistograms(b *testing.B) { converter: c, } - for i := 0; i < b.N; i++ { + for range b.N { numDroppedTimeSeries, err := dpClient.pushMetricsData(context.Background(), metrics) assert.NoError(b, err) assert.Equal(b, 0, numDroppedTimeSeries) @@ -2104,7 +2104,7 @@ func generateLargeMixedDPBatch() pmetric.Metrics { md.ResourceMetrics().EnsureCapacity(7500) ts := pcommon.NewTimestampFromTime(time.Now()) - for i := 0; i < 7500; i++ { + for i := range 7500 { rm := md.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().PutStr("kr0", "vr0") ilm := rm.ScopeMetrics().AppendEmpty() diff --git a/exporter/signalfxexporter/factory_test.go b/exporter/signalfxexporter/factory_test.go index 4c120f2c505e5..67614a3c64006 100644 --- a/exporter/signalfxexporter/factory_test.go +++ b/exporter/signalfxexporter/factory_test.go @@ -621,7 +621,7 @@ func BenchmarkMetricConversion(b *testing.B) { metrics, err := unmarshaller.UnmarshalMetrics(bytes) require.NoError(b, err) - for n := 0; n < b.N; n++ { + for range b.N { translated := c.MetricsToSignalFxV2(metrics) require.NotNil(b, translated) } @@ -677,7 +677,7 @@ func buildHistogram(im pmetric.Metric, name string, timestamp pcommon.Timestamp, idps := im.Histogram().DataPoints() idps.EnsureCapacity(dpCount) - for i := 0; i < dpCount; i++ { + for range dpCount { dp := idps.AppendEmpty() buildHistogramDP(dp, timestamp) } diff --git a/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go b/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go index 9bf88543d2ce2..abbc603401e88 100644 --- a/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go +++ b/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go @@ -123,7 +123,7 @@ func (a *ActiveServiceTracker) ProcessTraces(_ context.Context, traces ptrace.Tr // Take current time once since this is a system call. now := a.timeNow() - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { a.processEnvironment(traces.ResourceSpans().At(i).Resource(), now) a.processService(traces.ResourceSpans().At(i).Resource(), now) } diff --git a/exporter/signalfxexporter/internal/dimensions/dimclient_test.go b/exporter/signalfxexporter/internal/dimensions/dimclient_test.go index e82fbb9dfcab9..1b9e73e68ada9 100644 --- a/exporter/signalfxexporter/internal/dimensions/dimclient_test.go +++ b/exporter/signalfxexporter/internal/dimensions/dimclient_test.go @@ -392,7 +392,7 @@ func TestFlappyUpdates(t *testing.T) { defer client.Shutdown() // Do some flappy updates - for i := 0; i < 5; i++ { + for i := range 5 { require.NoError(t, client.acceptDimension(&DimensionUpdate{ Name: "pod_uid", Value: "abcd", diff --git a/exporter/signalfxexporter/internal/translation/converter.go b/exporter/signalfxexporter/internal/translation/converter.go index d34ad928096fa..eea1b8a62aeae 100644 --- a/exporter/signalfxexporter/internal/translation/converter.go +++ b/exporter/signalfxexporter/internal/translation/converter.go @@ -79,14 +79,14 @@ func (c *MetricsConverter) Start() { func (c *MetricsConverter) MetricsToSignalFxV2(md pmetric.Metrics) []*sfxpb.DataPoint { var sfxDataPoints []*sfxpb.DataPoint rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { rm := rms.At(i) extraDimensions := resourceToDimensions(rm.Resource()) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { ilm := rm.ScopeMetrics().At(j) var initialDps []*sfxpb.DataPoint - for k := 0; k < ilm.Metrics().Len(); k++ { + for k := range ilm.Metrics().Len() { currentMetric := ilm.Metrics().At(k) dps := c.translator.FromMetric(currentMetric, extraDimensions, c.dropHistogramBuckets, c.processHistograms) initialDps = append(initialDps, dps...) diff --git a/exporter/signalfxexporter/internal/translation/converter_test.go b/exporter/signalfxexporter/internal/translation/converter_test.go index ce1c7edaa7570..8f4d0fa6e2401 100644 --- a/exporter/signalfxexporter/internal/translation/converter_test.go +++ b/exporter/signalfxexporter/internal/translation/converter_test.go @@ -1187,7 +1187,7 @@ func TestInvalidNumberOfDimensions(t *testing.T) { m.SetName("valid") dp := m.SetEmptyGauge().DataPoints().AppendEmpty() dp.SetIntValue(123) - for i := 0; i < 10; i++ { + for i := range 10 { dp.Attributes().PutStr(fmt.Sprint("dim_key_", i), fmt.Sprint("dim_val_", i)) } c, err := NewMetricsConverter(logger, nil, nil, nil, "_-.", false, true) @@ -1208,7 +1208,7 @@ func TestInvalidNumberOfDimensions(t *testing.T) { MetricType: &gaugeType, Dimensions: make([]*sfxpb.Dimension, 0, 37), } - for i := 0; i < 37; i++ { + for i := range 37 { dpInvalid.Attributes().PutStr(fmt.Sprint("dim_key_", i), fmt.Sprint("dim_val_", i)) dpSFX.Dimensions = append(dpSFX.Dimensions, &sfxpb.Dimension{ Key: fmt.Sprint("dim_key_", i), diff --git a/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2.go b/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2.go index 70a6c31344f72..276d4f9fe3225 100644 --- a/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2.go +++ b/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2.go @@ -22,7 +22,7 @@ func LogRecordSliceToSignalFxV2( events := make([]*sfxpb.Event, 0, logs.Len()) numDroppedLogRecords := 0 - for i := 0; i < logs.Len(); i++ { + for i := range logs.Len() { lr := logs.At(i) event, ok := convertLogRecord(lr, resourceAttrs, logger) if !ok { diff --git a/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2_test.go b/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2_test.go index 24652373341c5..e1824b387b1df 100644 --- a/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2_test.go +++ b/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2_test.go @@ -168,7 +168,7 @@ func mapToEventProps(m map[string]any) []*sfxpb.Property { func buildNDimensions(n uint) []*sfxpb.Dimension { d := make([]*sfxpb.Dimension, 0, n) - for i := uint(0); i < n; i++ { + for i := range n { idx := int(i) suffix := strconv.Itoa(idx) d = append(d, &sfxpb.Dimension{ diff --git a/exporter/signalfxexporter/internal/utils/histogram_utils.go b/exporter/signalfxexporter/internal/utils/histogram_utils.go index 42f520237c790..5651a7c492143 100644 --- a/exporter/signalfxexporter/internal/utils/histogram_utils.go +++ b/exporter/signalfxexporter/internal/utils/histogram_utils.go @@ -27,7 +27,7 @@ func removeAccessToken(dest pmetric.ResourceMetrics) { // Lastly, the scope metric at index 1 has two Histogram type metric which can be found at index 0 and 2. func matchedHistogramResourceMetrics(md pmetric.Metrics) (matchedRMIdx map[int]map[int][]int) { rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { rm := rms.At(i) matchedSMIdx := matchedHistogramScopeMetrics(rm) if len(matchedSMIdx) > 0 { @@ -48,7 +48,7 @@ func matchedHistogramResourceMetrics(md pmetric.Metrics) (matchedRMIdx map[int]m // And that the scope metric at index 1 has two Histogram type metric which can be found at index 0 and 2. func matchedHistogramScopeMetrics(rm pmetric.ResourceMetrics) (matchedSMIdx map[int][]int) { ilms := rm.ScopeMetrics() - for i := 0; i < ilms.Len(); i++ { + for i := range ilms.Len() { ilm := ilms.At(i) matchedMetricsIdx := matchedHistogramMetrics(ilm) if len(matchedMetricsIdx) > 0 { @@ -67,7 +67,7 @@ func matchedHistogramScopeMetrics(rm pmetric.ResourceMetrics) (matchedSMIdx map[ // The above output can be interpreted as input scope metric has Histogram type metric at index 0 and 2. func matchedHistogramMetrics(ilm pmetric.ScopeMetrics) (matchedMetricsIdx []int) { ms := ilm.Metrics() - for i := 0; i < ms.Len(); i++ { + for i := range ms.Len() { metric := ms.At(i) if metric.Type() == pmetric.MetricTypeHistogram { matchedMetricsIdx = append(matchedMetricsIdx, i) diff --git a/exporter/signalfxexporter/internal/utils/histogram_utils_test.go b/exporter/signalfxexporter/internal/utils/histogram_utils_test.go index 3a2426968ee32..47134f6de1e56 100644 --- a/exporter/signalfxexporter/internal/utils/histogram_utils_test.go +++ b/exporter/signalfxexporter/internal/utils/histogram_utils_test.go @@ -53,7 +53,7 @@ func buildHistogram(im pmetric.Metric, name string, timestamp pcommon.Timestamp, idps := im.Histogram().DataPoints() idps.EnsureCapacity(dpCount) - for i := 0; i < dpCount; i++ { + for range dpCount { dp := idps.AppendEmpty() buildHistogramDP(dp, timestamp) } @@ -64,7 +64,7 @@ func buildGauge(im pmetric.Metric, name string, timestamp pcommon.Timestamp, dpC idps := im.Gauge().DataPoints() idps.EnsureCapacity(dpCount) - for i := 0; i < dpCount; i++ { + for range dpCount { dp := idps.AppendEmpty() dp.SetTimestamp(timestamp) dp.SetDoubleValue(1000) @@ -77,7 +77,7 @@ func buildSum(im pmetric.Metric, name string, timestamp pcommon.Timestamp, dpCou idps := im.Sum().DataPoints() idps.EnsureCapacity(dpCount) - for i := 0; i < dpCount; i++ { + for range dpCount { dp := idps.AppendEmpty() dp.SetStartTimestamp(timestamp) dp.SetTimestamp(timestamp) diff --git a/exporter/splunkhecexporter/batchperscope.go b/exporter/splunkhecexporter/batchperscope.go index 388ad525f84ac..b3f6f43e90a6d 100644 --- a/exporter/splunkhecexporter/batchperscope.go +++ b/exporter/splunkhecexporter/batchperscope.go @@ -29,9 +29,9 @@ func (rb *perScopeBatcher) ConsumeLogs(ctx context.Context, logs plog.Logs) erro var profilingFound bool var otherLogsFound bool - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { rs := logs.ResourceLogs().At(i) - for j := 0; j < rs.ScopeLogs().Len(); j++ { + for j := range rs.ScopeLogs().Len() { if isProfilingData(rs.ScopeLogs().At(j)) { profilingFound = true } else { @@ -63,11 +63,11 @@ func (rb *perScopeBatcher) ConsumeLogs(ctx context.Context, logs plog.Logs) erro profilingLogs := plog.NewLogs() otherLogs := plog.NewLogs() - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { rs := logs.ResourceLogs().At(i) profilingFound = false otherLogsFound = false - for j := 0; j < rs.ScopeLogs().Len(); j++ { + for j := range rs.ScopeLogs().Len() { sl := rs.ScopeLogs().At(j) if isProfilingData(sl) { profilingFound = true @@ -108,7 +108,7 @@ func (rb *perScopeBatcher) ConsumeLogs(ctx context.Context, logs plog.Logs) erro func copyResourceLogs(src plog.ResourceLogs, dest plog.ResourceLogs, isProfiling bool) { src.Resource().CopyTo(dest.Resource()) - for j := 0; j < src.ScopeLogs().Len(); j++ { + for j := range src.ScopeLogs().Len() { sl := src.ScopeLogs().At(j) if isProfilingData(sl) == isProfiling { sl.CopyTo(dest.ScopeLogs().AppendEmpty()) diff --git a/exporter/splunkhecexporter/client.go b/exporter/splunkhecexporter/client.go index 70e2e11bb6598..32e89b7bbd8ad 100644 --- a/exporter/splunkhecexporter/client.go +++ b/exporter/splunkhecexporter/client.go @@ -139,7 +139,7 @@ func (c *client) pushLogData(ctx context.Context, ld plog.Logs) error { // All logs in a batch have only one type (regular or profiling logs) after perScopeBatcher, // so we can just check the first one. - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { sls := ld.ResourceLogs().At(i).ScopeLogs() if sls.Len() > 0 { if isProfilingData(sls.At(0)) { @@ -385,11 +385,11 @@ func (c *client) pushMultiMetricsDataInBatches(ctx context.Context, md pmetric.M var permanentErrors []error var events []*splunk.Event - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rm := md.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { sm := rm.ScopeMetrics().At(j) - for k := 0; k < sm.Metrics().Len(); k++ { + for k := range sm.Metrics().Len() { metric := sm.Metrics().At(k) // Parsing metric record to Splunk event. diff --git a/exporter/splunkhecexporter/client_test.go b/exporter/splunkhecexporter/client_test.go index 549c41efca3c6..8d716d58defdb 100644 --- a/exporter/splunkhecexporter/client_test.go +++ b/exporter/splunkhecexporter/client_test.go @@ -76,11 +76,11 @@ func createMetricsData(resourcesNum, dataPointsNum int) pmetric.Metrics { doubleVal := 1234.5678 metrics := pmetric.NewMetrics() - for i := 0; i < resourcesNum; i++ { + for i := range resourcesNum { rm := metrics.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().PutStr("k0", fmt.Sprintf("v%d", i)) rm.Resource().Attributes().PutStr("k1", "v1") - for j := 0; j < dataPointsNum; j++ { + for j := range dataPointsNum { count := i*dataPointsNum + j tsUnix := time.Unix(int64(count), int64(count)*time.Millisecond.Nanoseconds()) ilm := rm.ScopeMetrics().AppendEmpty() @@ -103,11 +103,11 @@ func createTraceData(resourcesNum int, spansNum int) ptrace.Traces { traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() - for i := 0; i < resourcesNum; i++ { + for i := range resourcesNum { rs.Resource().Attributes().PutStr("resource", fmt.Sprintf("R%d", i)) ils := rs.ScopeSpans().AppendEmpty() ils.Spans().EnsureCapacity(spansNum) - for j := 0; j < spansNum; j++ { + for j := range spansNum { span := ils.Spans().AppendEmpty() span.SetName("root") count := i*spansNum + j @@ -154,14 +154,14 @@ func repeatableString(length int) string { func createLogDataWithCustomLibraries(numResources int, libraries []string, numRecords []int) plog.Logs { logs := plog.NewLogs() logs.ResourceLogs().EnsureCapacity(numResources) - for i := 0; i < numResources; i++ { + for i := range numResources { rl := logs.ResourceLogs().AppendEmpty() rl.ScopeLogs().EnsureCapacity(len(libraries)) - for j := 0; j < len(libraries); j++ { + for j := range libraries { sl := rl.ScopeLogs().AppendEmpty() sl.Scope().SetName(libraries[j]) sl.LogRecords().EnsureCapacity(numRecords[j]) - for k := 0; k < numRecords[j]; k++ { + for k := range numRecords[j] { ts := pcommon.Timestamp(int64(k) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStr("mylog") @@ -806,11 +806,11 @@ func TestReceiveLogs(t *testing.T) { // ensure all events are sent out droppedCount := test.logs.LogRecordCount() - for i := 0; i < test.logs.ResourceLogs().Len(); i++ { + for i := range test.logs.ResourceLogs().Len() { rl := test.logs.ResourceLogs().At(i) - for j := 0; j < rl.ScopeLogs().Len(); j++ { + for j := range rl.ScopeLogs().Len() { sl := rl.ScopeLogs().At(j) - for k := 0; k < sl.LogRecords().Len(); k++ { + for k := range sl.LogRecords().Len() { lr := sl.LogRecords().At(k) attrVal, ok := lr.Attributes().Get("otel.log.name") require.True(t, ok) @@ -1764,7 +1764,7 @@ func benchPushLogData(b *testing.B, numResources int, numRecords int, bufSize ui b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { err := exp.ConsumeLogs(context.Background(), logs) require.NoError(b, err) } @@ -1906,7 +1906,7 @@ func benchPushMetricData(b *testing.B, numResources int, numRecords int, bufSize b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { err := exp.ConsumeMetrics(context.Background(), metrics) require.NoError(b, err) } @@ -1927,7 +1927,7 @@ func BenchmarkConsumeLogsRejected(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { err := exp.ConsumeLogs(context.Background(), logs) require.Error(b, err) } diff --git a/exporter/splunkhecexporter/internal/integrationtestutils/splunk.go b/exporter/splunkhecexporter/internal/integrationtestutils/splunk.go index 36199d2abd4a8..89ac3b4baee78 100644 --- a/exporter/splunkhecexporter/internal/integrationtestutils/splunk.go +++ b/exporter/splunkhecexporter/internal/integrationtestutils/splunk.go @@ -30,7 +30,7 @@ func CheckEventsFromSplunk(searchQuery string, startTime string, endTimeOptional // post search jobID := postSearchRequest(user, password, baseURL, searchQuery, startTime, endTime) // wait for search status done == true - for i := 0; i < 20; i++ { // limit loop - not allowing infinite looping + for range 20 { // limit loop - not allowing infinite looping logger.Println("Checking Search Status ...") isDone := checkSearchJobStatusCode(user, password, baseURL, jobID) if isDone == true { diff --git a/exporter/splunkhecexporter/metricdata_to_splunk.go b/exporter/splunkhecexporter/metricdata_to_splunk.go index ec95f98aa4d53..59207b3564b97 100644 --- a/exporter/splunkhecexporter/metricdata_to_splunk.go +++ b/exporter/splunkhecexporter/metricdata_to_splunk.go @@ -86,7 +86,7 @@ func mapMetricToSplunkEvent(res pcommon.Resource, m pmetric.Metric, config *Conf pts := m.Gauge().DataPoints() splunkMetrics := make([]*splunk.Event, pts.Len()) - for gi := 0; gi < pts.Len(); gi++ { + for gi := range pts.Len() { dataPt := pts.At(gi) fields := cloneMap(commonFields) populateAttributes(fields, dataPt.Attributes()) @@ -103,7 +103,7 @@ func mapMetricToSplunkEvent(res pcommon.Resource, m pmetric.Metric, config *Conf case pmetric.MetricTypeHistogram: pts := m.Histogram().DataPoints() var splunkMetrics []*splunk.Event - for gi := 0; gi < pts.Len(); gi++ { + for gi := range pts.Len() { dataPt := pts.At(gi) bounds := dataPt.ExplicitBounds() counts := dataPt.BucketCounts() @@ -129,7 +129,7 @@ func mapMetricToSplunkEvent(res pcommon.Resource, m pmetric.Metric, config *Conf } value := uint64(0) // now create buckets for each bound. - for bi := 0; bi < bounds.Len(); bi++ { + for bi := range bounds.Len() { fields := cloneMap(commonFields) populateAttributes(fields, dataPt.Attributes()) fields["le"] = float64ToDimValue(bounds.At(bi)) @@ -154,7 +154,7 @@ func mapMetricToSplunkEvent(res pcommon.Resource, m pmetric.Metric, config *Conf case pmetric.MetricTypeSum: pts := m.Sum().DataPoints() splunkMetrics := make([]*splunk.Event, pts.Len()) - for gi := 0; gi < pts.Len(); gi++ { + for gi := range pts.Len() { dataPt := pts.At(gi) fields := cloneMap(commonFields) populateAttributes(fields, dataPt.Attributes()) @@ -172,7 +172,7 @@ func mapMetricToSplunkEvent(res pcommon.Resource, m pmetric.Metric, config *Conf case pmetric.MetricTypeSummary: pts := m.Summary().DataPoints() var splunkMetrics []*splunk.Event - for gi := 0; gi < pts.Len(); gi++ { + for gi := range pts.Len() { dataPt := pts.At(gi) // first, add one event for sum, and one for count if !math.IsNaN(dataPt.Sum()) { @@ -193,7 +193,7 @@ func mapMetricToSplunkEvent(res pcommon.Resource, m pmetric.Metric, config *Conf } // now create values for each quantile. - for bi := 0; bi < dataPt.QuantileValues().Len(); bi++ { + for bi := range dataPt.QuantileValues().Len() { fields := cloneMap(commonFields) populateAttributes(fields, dataPt.Attributes()) dp := dataPt.QuantileValues().At(bi) diff --git a/exporter/splunkhecexporter/tracedata_to_splunk.go b/exporter/splunkhecexporter/tracedata_to_splunk.go index a00219b928e42..16836ef070b42 100644 --- a/exporter/splunkhecexporter/tracedata_to_splunk.go +++ b/exporter/splunkhecexporter/tracedata_to_splunk.go @@ -93,7 +93,7 @@ func toHecSpan(span ptrace.Span) hecSpan { attributes := span.Attributes().AsRaw() links := make([]hecLink, span.Links().Len()) - for i := 0; i < span.Links().Len(); i++ { + for i := range span.Links().Len() { link := span.Links().At(i) linkAttributes := link.Attributes().AsRaw() links[i] = hecLink{ @@ -104,7 +104,7 @@ func toHecSpan(span ptrace.Span) hecSpan { } } events := make([]hecEvent, span.Events().Len()) - for i := 0; i < span.Events().Len(); i++ { + for i := range span.Events().Len() { event := span.Events().At(i) eventAttributes := event.Attributes().AsRaw() events[i] = hecEvent{ diff --git a/exporter/stefexporter/exporter_test.go b/exporter/stefexporter/exporter_test.go index f9ede715bdf0d..faa3eef645d20 100644 --- a/exporter/stefexporter/exporter_test.go +++ b/exporter/stefexporter/exporter_test.go @@ -172,7 +172,7 @@ func TestExport(t *testing.T) { // Send some metrics. Make sure the count of batches exceeds the number of consumers // so that we can hit the case where exporter begins to forcedly flush encoded data. pointCount := int64(0) - for i := 0; i < 2*cfg.QueueConfig.NumConsumers; i++ { + for range 2 * cfg.QueueConfig.NumConsumers { md := testdata.GenerateMetrics(1) pointCount += int64(md.DataPointCount()) err := exp.ConsumeMetrics(context.Background(), md) diff --git a/exporter/sumologicexporter/exporter.go b/exporter/sumologicexporter/exporter.go index 14060843d9230..958044a758513 100644 --- a/exporter/sumologicexporter/exporter.go +++ b/exporter/sumologicexporter/exporter.go @@ -320,7 +320,7 @@ func (se *sumologicexporter) pushLogsData(ctx context.Context, ld plog.Logs) err // Iterate over ResourceLogs rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rl := rls.At(i) currentMetadata := newFields(rl.Resource().Attributes()) @@ -344,7 +344,7 @@ func (se *sumologicexporter) pushLogsData(ctx context.Context, ld plog.Logs) err rls := ld.ResourceLogs().AppendEmpty() dropped[i].resource.CopyTo(rls.Resource()) - for j := 0; j < len(dropped[i].records); j++ { + for j := range len(dropped[i].records) { dropped[i].records[j].CopyTo( rls.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty(), ) diff --git a/exporter/sumologicexporter/exporter_test.go b/exporter/sumologicexporter/exporter_test.go index e042b7db6f9ab..f20e2f43c77f6 100644 --- a/exporter/sumologicexporter/exporter_test.go +++ b/exporter/sumologicexporter/exporter_test.go @@ -516,9 +516,9 @@ func Benchmark_ExporterPushLogs(b *testing.B) { }() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { wg := sync.WaitGroup{} - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) go func() { logs := logRecordsToLogs(exampleNLogs(128)) diff --git a/exporter/sumologicexporter/fields_test.go b/exporter/sumologicexporter/fields_test.go index 1acb084c16dba..ec4afdd89bf73 100644 --- a/exporter/sumologicexporter/fields_test.go +++ b/exporter/sumologicexporter/fields_test.go @@ -82,7 +82,7 @@ func BenchmarkFields(b *testing.B) { sut := newFields(attrMap) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _ = sut.string() } } diff --git a/exporter/sumologicexporter/otlp.go b/exporter/sumologicexporter/otlp.go index a243011f95a90..5b304eb8b112b 100644 --- a/exporter/sumologicexporter/otlp.go +++ b/exporter/sumologicexporter/otlp.go @@ -16,11 +16,11 @@ func decomposeHistograms(md pmetric.Metrics) pmetric.Metrics { // short circuit and do nothing if no Histograms are present foundHistogram := false outer: - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { resourceMetric := md.ResourceMetrics().At(i) - for j := 0; j < resourceMetric.ScopeMetrics().Len(); j++ { + for j := range resourceMetric.ScopeMetrics().Len() { scopeMetric := resourceMetric.ScopeMetrics().At(j) - for k := 0; k < scopeMetric.Metrics().Len(); k++ { + for k := range scopeMetric.Metrics().Len() { foundHistogram = scopeMetric.Metrics().At(k).Type() == pmetric.MetricTypeHistogram if foundHistogram { break outer @@ -35,11 +35,11 @@ outer: decomposed := pmetric.NewMetrics() md.CopyTo(decomposed) - for i := 0; i < decomposed.ResourceMetrics().Len(); i++ { + for i := range decomposed.ResourceMetrics().Len() { resourceMetric := decomposed.ResourceMetrics().At(i) - for j := 0; j < resourceMetric.ScopeMetrics().Len(); j++ { + for j := range resourceMetric.ScopeMetrics().Len() { metrics := resourceMetric.ScopeMetrics().At(j).Metrics() - for k := 0; k < metrics.Len(); k++ { + for k := range metrics.Len() { metric := metrics.At(k) if metric.Type() == pmetric.MetricTypeHistogram { decomposedHistogram := decomposeHistogram(metric) @@ -78,12 +78,12 @@ func getHistogramBucketsMetric(metric pmetric.Metric) pmetric.Metric { bucketsMetric.SetEmptyGauge() bucketsDatapoints := bucketsMetric.Gauge().DataPoints() - for i := 0; i < histogram.DataPoints().Len(); i++ { + for i := range histogram.DataPoints().Len() { histogramDataPoint := histogram.DataPoints().At(i) histogramBounds := histogramDataPoint.ExplicitBounds() var cumulative uint64 - for j := 0; j < histogramBounds.Len(); j++ { + for j := range histogramBounds.Len() { bucketDataPoint := bucketsDatapoints.AppendEmpty() bound := histogramBounds.At(j) histogramDataPoint.Attributes().CopyTo(bucketDataPoint.Attributes()) @@ -116,7 +116,7 @@ func getHistogramSumMetric(metric pmetric.Metric) pmetric.Metric { sumMetric.SetEmptyGauge() sumDataPoints := sumMetric.Gauge().DataPoints() - for i := 0; i < histogram.DataPoints().Len(); i++ { + for i := range histogram.DataPoints().Len() { histogramDataPoint := histogram.DataPoints().At(i) sumDataPoint := sumDataPoints.AppendEmpty() histogramDataPoint.Attributes().CopyTo(sumDataPoint.Attributes()) @@ -137,7 +137,7 @@ func getHistogramCountMetric(metric pmetric.Metric) pmetric.Metric { countMetric.SetEmptyGauge() countDataPoints := countMetric.Gauge().DataPoints() - for i := 0; i < histogram.DataPoints().Len(); i++ { + for i := range histogram.DataPoints().Len() { histogramDataPoint := histogram.DataPoints().At(i) countDataPoint := countDataPoints.AppendEmpty() histogramDataPoint.Attributes().CopyTo(countDataPoint.Attributes()) diff --git a/exporter/sumologicexporter/prometheus_formatter.go b/exporter/sumologicexporter/prometheus_formatter.go index 24d64ba279f77..aac9b006d0b9c 100644 --- a/exporter/sumologicexporter/prometheus_formatter.go +++ b/exporter/sumologicexporter/prometheus_formatter.go @@ -253,7 +253,7 @@ func (f *prometheusFormatter) gauge2Strings(metric pmetric.Metric, attributes pc dps := metric.Gauge().DataPoints() lines := make([]string, 0, dps.Len()) - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) line := f.numberDataPointValueLine( metric.Name(), @@ -271,7 +271,7 @@ func (f *prometheusFormatter) sum2Strings(metric pmetric.Metric, attributes pcom dps := metric.Sum().DataPoints() lines := make([]string, 0, dps.Len()) - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) line := f.numberDataPointValueLine( metric.Name(), @@ -290,11 +290,11 @@ func (f *prometheusFormatter) summary2Strings(metric pmetric.Metric, attributes dps := metric.Summary().DataPoints() var lines []string - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) qs := dp.QuantileValues() additionalAttributes := pcommon.NewMap() - for i := 0; i < qs.Len(); i++ { + for i := range qs.Len() { q := qs.At(i) additionalAttributes.PutDouble(prometheusQuantileTag, q.Quantile()) @@ -332,7 +332,7 @@ func (f *prometheusFormatter) histogram2Strings(metric pmetric.Metric, attribute dps := metric.Histogram().DataPoints() var lines []string - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) explicitBounds := dp.ExplicitBounds() @@ -340,7 +340,7 @@ func (f *prometheusFormatter) histogram2Strings(metric pmetric.Metric, attribute var cumulative uint64 additionalAttributes := pcommon.NewMap() - for i := 0; i < explicitBounds.Len(); i++ { + for i := range explicitBounds.Len() { bound := explicitBounds.At(i) cumulative += dp.BucketCounts().At(i) additionalAttributes.PutDouble(prometheusLeTag, bound) diff --git a/exporter/sumologicexporter/prometheus_formatter_test.go b/exporter/sumologicexporter/prometheus_formatter_test.go index dbb593df5e625..ab1ed87436100 100644 --- a/exporter/sumologicexporter/prometheus_formatter_test.go +++ b/exporter/sumologicexporter/prometheus_formatter_test.go @@ -196,7 +196,7 @@ func Benchmark_PrometheusFormatter_Metric2String(b *testing.B) { metric, attributes := buildExampleHistogramMetric(true) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _ = f.metric2String(metric, attributes) } } diff --git a/exporter/sumologicexporter/sender.go b/exporter/sumologicexporter/sender.go index 6498e8fc8f844..2359a3708751c 100644 --- a/exporter/sumologicexporter/sender.go +++ b/exporter/sumologicexporter/sender.go @@ -394,9 +394,9 @@ func (s *sender) sendNonOTLPLogs(ctx context.Context, rl plog.ResourceLogs, flds ) slgs := rl.ScopeLogs() - for i := 0; i < slgs.Len(); i++ { + for i := range slgs.Len() { slg := slgs.At(i) - for j := 0; j < slg.LogRecords().Len(); j++ { + for j := range slg.LogRecords().Len() { lr := slg.LogRecords().At(j) formattedLine, err := s.formatLogLine(lr) if err != nil { @@ -471,7 +471,7 @@ func (s *sender) sendNonOTLPMetrics(ctx context.Context, md pmetric.Metrics) (pm rms := md.ResourceMetrics() droppedMetrics := pmetric.NewMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { rm := rms.At(i) flds = newFields(rm.Resource().Attributes()) sms := rm.ScopeMetrics() @@ -498,10 +498,10 @@ func (s *sender) sendNonOTLPMetrics(ctx context.Context, md pmetric.Metrics) (pm // transform the metrics into formatted lines ready to be sent var formattedLines []string var err error - for i := 0; i < sms.Len(); i++ { + for i := range sms.Len() { sm := sms.At(i) - for j := 0; j < sm.Metrics().Len(); j++ { + for j := range sm.Metrics().Len() { m := sm.Metrics().At(j) var formattedLine string diff --git a/exporter/sumologicexporter/sender_test.go b/exporter/sumologicexporter/sender_test.go index 6b2ce1820e19e..a4ba8a4fb35f8 100644 --- a/exporter/sumologicexporter/sender_test.go +++ b/exporter/sumologicexporter/sender_test.go @@ -146,7 +146,7 @@ func exampleTwoLogs() []plog.LogRecord { func exampleNLogs(n int) []plog.LogRecord { buffer := make([]plog.LogRecord, n) - for i := 0; i < n; i++ { + for i := range n { buffer[i] = plog.NewLogRecord() buffer[i].Body().SetStr("Example log") } @@ -833,7 +833,7 @@ func TestSendLogsOTLP(t *testing.T) { ls := l.ResourceLogs().AppendEmpty() logRecords := exampleTwoLogs() - for i := 0; i < len(logRecords); i++ { + for i := range logRecords { logRecords[i].MoveTo(ls.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()) } diff --git a/exporter/syslogexporter/exporter.go b/exporter/syslogexporter/exporter.go index 99d69a9b01b3f..15923f08e2804 100644 --- a/exporter/syslogexporter/exporter.go +++ b/exporter/syslogexporter/exporter.go @@ -86,11 +86,11 @@ func (se *syslogexporter) pushLogsData(ctx context.Context, logs plog.Logs) erro func (se *syslogexporter) exportBatch(ctx context.Context, logs plog.Logs) error { var payload strings.Builder - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { resourceLogs := logs.ResourceLogs().At(i) - for j := 0; j < resourceLogs.ScopeLogs().Len(); j++ { + for j := range resourceLogs.ScopeLogs().Len() { scopeLogs := resourceLogs.ScopeLogs().At(j) - for k := 0; k < scopeLogs.LogRecords().Len(); k++ { + for k := range scopeLogs.LogRecords().Len() { logRecord := scopeLogs.LogRecords().At(k) formatted := se.formatter.format(logRecord) payload.WriteString(formatted) @@ -121,13 +121,13 @@ func (se *syslogexporter) exportNonBatch(ctx context.Context, logs plog.Logs) er errs := []error{} droppedLogs := plog.NewLogs() - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { resourceLogs := logs.ResourceLogs().At(i) droppedResourceLogs := droppedLogs.ResourceLogs().AppendEmpty() - for j := 0; j < resourceLogs.ScopeLogs().Len(); j++ { + for j := range resourceLogs.ScopeLogs().Len() { scopeLogs := resourceLogs.ScopeLogs().At(j) droppedScopeLogs := droppedResourceLogs.ScopeLogs().AppendEmpty() - for k := 0; k < scopeLogs.LogRecords().Len(); k++ { + for k := range scopeLogs.LogRecords().Len() { logRecord := scopeLogs.LogRecords().At(k) formatted := se.formatter.format(logRecord) err = sender.Write(ctx, formatted) diff --git a/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go b/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go index a5dd952a2cbdf..d3ce34e275f1b 100644 --- a/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go +++ b/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go @@ -23,7 +23,7 @@ func createSimpleLogData(numberOfLogs int) plog.Logs { rl.ScopeLogs().AppendEmpty() // Add an empty ScopeLogs sl := rl.ScopeLogs().AppendEmpty() - for i := 0; i < numberOfLogs; i++ { + for i := range numberOfLogs { ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStr("mylog") diff --git a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice.go b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice.go index 4d16b95a9a3d3..9e30b34431e81 100644 --- a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice.go +++ b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice.go @@ -39,16 +39,16 @@ func convertLogs(ld plog.Logs) []*cls.Log { clsLogs := make([]*cls.Log, 0, ld.LogRecordCount()) rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rl := rls.At(i) ills := rl.ScopeLogs() resource := rl.Resource() resourceContents := resourceToLogContents(resource) - for j := 0; j < ills.Len(); j++ { + for j := range ills.Len() { ils := ills.At(j) instrumentationLibraryContents := instrumentationLibraryToLogContents(ils.Scope()) logs := ils.LogRecords() - for j := 0; j < logs.Len(); j++ { + for j := range logs.Len() { clsLog := mapLogRecordToLogService(logs.At(j), resourceContents, instrumentationLibraryContents) if clsLog != nil { clsLogs = append(clsLogs, clsLog) diff --git a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go index aa66674d5c211..c961d3bc0034f 100644 --- a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go +++ b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go @@ -50,7 +50,7 @@ func createLogData(numberOfLogs int) plog.Logs { sl.Scope().SetName("collector") sl.Scope().SetVersion("v0.1.0") - for i := 0; i < numberOfLogs; i++ { + for i := range numberOfLogs { ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() switch i { @@ -107,7 +107,7 @@ func TestConvertLogs(t *testing.T) { wantLogs := make([][]logKeyValuePair, 0, validLogCount) resultLogFile := "./testdata/logservice_log_data.json" require.NoError(t, loadFromJSON(resultLogFile, &wantLogs)) - for j := 0; j < validLogCount; j++ { + for j := range validLogCount { sort.Sort(logKeyValuePairs(gotLogPairs[j])) sort.Sort(logKeyValuePairs(wantLogs[j])) assert.Equal(t, wantLogs[j], gotLogPairs[j]) diff --git a/extension/ackextension/inmemory_test.go b/extension/ackextension/inmemory_test.go index da5bb59733d82..a7860e2e38f6f 100644 --- a/extension/ackextension/inmemory_test.go +++ b/extension/ackextension/inmemory_test.go @@ -20,13 +20,13 @@ func TestAckPartitionNextAckConcurrency(t *testing.T) { wg := sync.WaitGroup{} wg.Add(2) go func() { - for i := 0; i < ackSize/2; i++ { + for range ackSize / 2 { map1[ap.nextAck()] = struct{}{} } wg.Done() }() go func() { - for i := 0; i < ackSize/2; i++ { + for range ackSize / 2 { map2[ap.nextAck()] = struct{}{} } wg.Done() @@ -55,7 +55,7 @@ func TestExtensionAck_ProcessEvents_Concurrency(t *testing.T) { // send events through different partitions go func() { - for i := 0; i < 100; i++ { + for range 100 { // each partition has 3 events map1[ext.ProcessEvent(fmt.Sprint(partitionName))] = struct{}{} } @@ -63,7 +63,7 @@ func TestExtensionAck_ProcessEvents_Concurrency(t *testing.T) { }() go func() { - for i := 0; i < 100; i++ { + for range 100 { // each partition has 3 events map2[ext.ProcessEvent(fmt.Sprint(partitionName))] = struct{}{} } @@ -71,7 +71,7 @@ func TestExtensionAck_ProcessEvents_Concurrency(t *testing.T) { }() go func() { - for i := 0; i < 100; i++ { + for range 100 { // each partition has 3 events map3[ext.ProcessEvent(fmt.Sprint(partitionName))] = struct{}{} } @@ -94,15 +94,15 @@ func TestExtensionAck_ProcessEvents_EventsUnAcked(t *testing.T) { ext := newInMemoryAckExtension(&conf) // send events through different partitions - for i := 0; i < 100; i++ { + for i := range 100 { // each partition has 3 events - for j := 0; j < 3; j++ { + for range 3 { ext.ProcessEvent(fmt.Sprintf("part-%d", i)) } } // non-acked events should be return false - for i := 0; i < 100; i++ { + for i := range 100 { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{0, 1, 2}) require.Len(t, result, 3) require.False(t, result[0]) @@ -119,15 +119,15 @@ func TestExtensionAck_ProcessEvents_EventsAcked(t *testing.T) { ext := newInMemoryAckExtension(&conf) // send events through different partitions - for i := 0; i < 100; i++ { + for i := range 100 { // each partition has 3 events - for j := 0; j < 3; j++ { + for range 3 { ext.ProcessEvent(fmt.Sprintf("part-%d", i)) } } // ack the second event of all even partitions and first and third events of all odd partitions - for i := 0; i < 100; i++ { + for i := range 100 { if i%2 == 0 { ext.Ack(fmt.Sprintf("part-%d", i), 2) } else { @@ -137,7 +137,7 @@ func TestExtensionAck_ProcessEvents_EventsAcked(t *testing.T) { } // second event of even partitions should be acked, and first and third events of odd partitions should be acked - for i := 0; i < 100; i++ { + for i := range 100 { if i%2 == 0 { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Len(t, result, 3) @@ -162,15 +162,15 @@ func TestExtensionAck_QueryAcks_Unidempotent(t *testing.T) { ext := newInMemoryAckExtension(&conf) // send events through different partitions - for i := 0; i < 100; i++ { + for i := range 100 { // each partition has 3 events - for j := 0; j < 3; j++ { + for range 3 { ext.ProcessEvent(fmt.Sprintf("part-%d", i)) } } // ack the second event of all even partitions and first and third events of all odd partitions - for i := 0; i < 100; i++ { + for i := range 100 { if i%2 == 0 { ext.Ack(fmt.Sprintf("part-%d", i), 2) } else { @@ -180,7 +180,7 @@ func TestExtensionAck_QueryAcks_Unidempotent(t *testing.T) { } // second event of even partitions should be acked, and first and third events of odd partitions should be acked - for i := 0; i < 100; i++ { + for i := range 100 { if i%2 == 0 { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Len(t, result, 3) @@ -197,7 +197,7 @@ func TestExtensionAck_QueryAcks_Unidempotent(t *testing.T) { } // querying the same acked events should result in false - for i := 0; i < 100; i++ { + for i := range 100 { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Len(t, result, 3) require.False(t, result[1]) @@ -217,10 +217,10 @@ func TestExtensionAckAsync(t *testing.T) { var wg sync.WaitGroup wg.Add(partitionCount) // send events through different partitions - for i := 0; i < partitionCount; i++ { + for i := range partitionCount { go func() { // each partition has 3 events - for j := 0; j < 3; j++ { + for range 3 { ext.ProcessEvent(fmt.Sprintf("part-%d", i)) } wg.Done() @@ -230,7 +230,7 @@ func TestExtensionAckAsync(t *testing.T) { wg.Wait() // non-acked events should be return false - for i := 0; i < partitionCount; i++ { + for i := range partitionCount { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Len(t, result, 3) require.False(t, result[1]) @@ -240,7 +240,7 @@ func TestExtensionAckAsync(t *testing.T) { wg.Add(partitionCount) // ack the second event of all even partitions and first and third events of all odd partitions - for i := 0; i < partitionCount; i++ { + for i := range partitionCount { go func() { if i%2 == 0 { ext.Ack(fmt.Sprintf("part-%d", i), 2) @@ -254,7 +254,7 @@ func TestExtensionAckAsync(t *testing.T) { wg.Wait() // second event of even partitions should be acked, and first and third events of odd partitions should be acked - for i := 0; i < partitionCount; i++ { + for i := range partitionCount { if i%2 == 0 { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Len(t, result, 3) @@ -272,7 +272,7 @@ func TestExtensionAckAsync(t *testing.T) { wg.Add(100) resultChan := make(chan map[uint64]bool, partitionCount) // querying the same acked events should result in false - for i := 0; i < partitionCount; i++ { + for i := range partitionCount { go func() { resultChan <- ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) wg.Done() @@ -280,7 +280,7 @@ func TestExtensionAckAsync(t *testing.T) { } wg.Wait() - for i := 0; i < partitionCount; i++ { + for range partitionCount { result := <-resultChan require.Len(t, result, 3) require.False(t, result[1]) diff --git a/extension/encoding/jsonlogencodingextension/extension.go b/extension/encoding/jsonlogencodingextension/extension.go index 18575859ce7fc..b186c557c0bc8 100644 --- a/extension/encoding/jsonlogencodingextension/extension.go +++ b/extension/encoding/jsonlogencodingextension/extension.go @@ -31,13 +31,13 @@ func (e *jsonLogExtension) MarshalLogs(ld plog.Logs) ([]byte, error) { logs := make([]map[string]any, 0, ld.LogRecordCount()) rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rl := rls.At(i) sls := rl.ScopeLogs() - for j := 0; j < sls.Len(); j++ { + for j := range sls.Len() { sl := sls.At(j) logSlice := sl.LogRecords() - for k := 0; k < logSlice.Len(); k++ { + for k := range logSlice.Len() { log := logSlice.At(k) switch log.Body().Type() { case pcommon.ValueTypeMap: @@ -82,15 +82,15 @@ func (e *jsonLogExtension) logProcessor(ld plog.Logs) ([]byte, error) { logs := make([]logBody, 0, ld.LogRecordCount()) rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rl := rls.At(i) resourceAttrs := rl.Resource().Attributes().AsRaw() sls := rl.ScopeLogs() - for j := 0; j < sls.Len(); j++ { + for j := range sls.Len() { sl := sls.At(j) logSlice := sl.LogRecords() - for k := 0; k < logSlice.Len(); k++ { + for k := range logSlice.Len() { log := logSlice.At(k) logEvent := logBody{ Body: log.Body().AsRaw(), diff --git a/extension/encoding/otlpencodingextension/extension_test.go b/extension/encoding/otlpencodingextension/extension_test.go index 77bac75347a70..4553d76d373b8 100644 --- a/extension/encoding/otlpencodingextension/extension_test.go +++ b/extension/encoding/otlpencodingextension/extension_test.go @@ -152,7 +152,7 @@ func generateTraces() ptrace.Traces { md := ptrace.NewTraces() ilm := md.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty() ilm.Spans().EnsureCapacity(num) - for i := 0; i < num; i++ { + for range num { im := ilm.Spans().AppendEmpty() im.SetName("test_name") im.SetStartTimestamp(pcommon.NewTimestampFromTime(now)) @@ -166,7 +166,7 @@ func generateLogs() plog.Logs { md := plog.NewLogs() ilm := md.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty() ilm.LogRecords().EnsureCapacity(num) - for i := 0; i < num; i++ { + for range num { im := ilm.LogRecords().AppendEmpty() im.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) } @@ -182,7 +182,7 @@ func generateMetrics() pmetric.Metrics { md := pmetric.NewMetrics() ilm := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Metrics().EnsureCapacity(num) - for i := 0; i < num; i++ { + for range num { im := ilm.Metrics().AppendEmpty() im.SetName("test_name") idp := im.SetEmptySum().DataPoints().AppendEmpty() @@ -199,7 +199,7 @@ func generateProfiles() pprofile.Profiles { pd := pprofile.NewProfiles() ilm := pd.ResourceProfiles().AppendEmpty().ScopeProfiles().AppendEmpty() ilm.Profiles().EnsureCapacity(num) - for i := 0; i < num; i++ { + for range num { im := ilm.Profiles().AppendEmpty() im.SetProfileID([16]byte{0x01, 0x02, 0x03, 0x04}) im.SetStartTime(pcommon.NewTimestampFromTime(now)) diff --git a/extension/encoding/textencodingextension/text.go b/extension/encoding/textencodingextension/text.go index 81392f1091f50..bfe8f5ab5c6d9 100644 --- a/extension/encoding/textencodingextension/text.go +++ b/extension/encoding/textencodingextension/text.go @@ -63,11 +63,11 @@ func (r *textLogCodec) UnmarshalLogs(buf []byte) (plog.Logs, error) { func (r *textLogCodec) MarshalLogs(ld plog.Logs) ([]byte, error) { var b []byte - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { rl := ld.ResourceLogs().At(i) - for j := 0; j < rl.ScopeLogs().Len(); j++ { + for j := range rl.ScopeLogs().Len() { sl := rl.ScopeLogs().At(j) - for k := 0; k < sl.LogRecords().Len(); k++ { + for k := range sl.LogRecords().Len() { lr := sl.LogRecords().At(k) b = append(b, []byte(lr.Body().AsString())...) b = append(b, []byte(r.marshalingSeparator)...) diff --git a/extension/jaegerremotesampling/extension_test.go b/extension/jaegerremotesampling/extension_test.go index deecbc6955691..618896db2bc73 100644 --- a/extension/jaegerremotesampling/extension_test.go +++ b/extension/jaegerremotesampling/extension_test.go @@ -117,7 +117,7 @@ func TestRemote(t *testing.T) { assert.NoError(t, e.Start(context.Background(), componenttest.NewNopHost())) // make test case defined number of calls - for i := 0; i < tc.performedClientCallCount; i++ { + for range tc.performedClientCallCount { resp, err := http.Get("http://127.0.0.1:5778/sampling?service=foo") assert.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) diff --git a/extension/jaegerremotesampling/internal/source/filesource/filesource_test.go b/extension/jaegerremotesampling/internal/source/filesource/filesource_test.go index 29a9cfc6e87fd..363b17a162b23 100644 --- a/extension/jaegerremotesampling/internal/source/filesource/filesource_test.go +++ b/extension/jaegerremotesampling/internal/source/filesource/filesource_test.go @@ -410,7 +410,7 @@ func TestAutoUpdateStrategyWithFile(t *testing.T) { require.NoError(t, os.WriteFile(dstFile, []byte(newStr), 0o600)) // wait for reload timer - for i := 0; i < 1000; i++ { // wait up to 1sec + for range 1000 { // wait up to 1sec s, err = provider.GetSamplingStrategy(context.Background(), "foo") require.NoError(t, err) if s.ProbabilisticSampling != nil && s.ProbabilisticSampling.SamplingRate == 0.9 { @@ -450,7 +450,7 @@ func TestAutoUpdateStrategyWithURL(t *testing.T) { } // wait for reload timer - for i := 0; i < 1000; i++ { // wait up to 1sec + for range 1000 { // wait up to 1sec s, err = provider.GetSamplingStrategy(context.Background(), "foo") require.NoError(t, err) if s.ProbabilisticSampling != nil && s.ProbabilisticSampling.SamplingRate == 0.9 { diff --git a/extension/jaegerremotesampling/internal/source/remotesource/remote_strategy_cache_test.go b/extension/jaegerremotesampling/internal/source/remotesource/remote_strategy_cache_test.go index e242466c04f5a..f995a2cba682f 100644 --- a/extension/jaegerremotesampling/internal/source/remotesource/remote_strategy_cache_test.go +++ b/extension/jaegerremotesampling/internal/source/remotesource/remote_strategy_cache_test.go @@ -246,9 +246,9 @@ func Test_serviceStrategyCache_Concurrency(t *testing.T) { wg := sync.WaitGroup{} wg.Add(numThreads) - for i := 0; i < numThreads; i++ { + for i := range numThreads { go func() { - for j := 0; j < numIterationsPerThread; j++ { + for range numIterationsPerThread { for _, svcName := range []string{ fmt.Sprintf("thread-specific-service-%d", i), "contended-for-service", diff --git a/extension/observer/ecsobserver/internal/ecsmock/service.go b/extension/observer/ecsobserver/internal/ecsmock/service.go index 3c76aeab08253..65100cda4432a 100644 --- a/extension/observer/ecsobserver/internal/ecsmock/service.go +++ b/extension/observer/ecsobserver/internal/ecsmock/service.go @@ -304,7 +304,7 @@ func (c *Cluster) SetServices(services []*ecs.Service) { // GenTasks returns tasks with TaskArn set to arnPrefix+offset, where offset is [0, count). func GenTasks(arnPrefix string, count int, modifier func(i int, task *ecs.Task)) []*ecs.Task { var tasks []*ecs.Task - for i := 0; i < count; i++ { + for i := range count { t := &ecs.Task{ TaskArn: aws.String(arnPrefix + strconv.Itoa(i)), } @@ -320,7 +320,7 @@ func GenTasks(arnPrefix string, count int, modifier func(i int, task *ecs.Task)) // e.g. foo0:1, foo1:1 the `:` is following the task family version syntax. func GenTaskDefinitions(arnPrefix string, count int, version int, modifier func(i int, def *ecs.TaskDefinition)) []*ecs.TaskDefinition { var defs []*ecs.TaskDefinition - for i := 0; i < count; i++ { + for i := range count { d := &ecs.TaskDefinition{ TaskDefinitionArn: aws.String(fmt.Sprintf("%s%d:%d", arnPrefix, i, version)), } @@ -334,7 +334,7 @@ func GenTaskDefinitions(arnPrefix string, count int, version int, modifier func( func GenContainerInstances(arnPrefix string, count int, modifier func(i int, ci *ecs.ContainerInstance)) []*ecs.ContainerInstance { var instances []*ecs.ContainerInstance - for i := 0; i < count; i++ { + for i := range count { ci := &ecs.ContainerInstance{ ContainerInstanceArn: aws.String(fmt.Sprintf("%s%d", arnPrefix, i)), } @@ -348,7 +348,7 @@ func GenContainerInstances(arnPrefix string, count int, modifier func(i int, ci func GenEc2Instances(idPrefix string, count int, modifier func(i int, ins *ec2.Instance)) []*ec2.Instance { var instances []*ec2.Instance - for i := 0; i < count; i++ { + for i := range count { ins := &ec2.Instance{ InstanceId: aws.String(fmt.Sprintf("%s%d", idPrefix, i)), } @@ -362,7 +362,7 @@ func GenEc2Instances(idPrefix string, count int, modifier func(i int, ins *ec2.I func GenServices(arnPrefix string, count int, modifier func(i int, s *ecs.Service)) []*ecs.Service { var services []*ecs.Service - for i := 0; i < count; i++ { + for i := range count { svc := &ecs.Service{ ServiceArn: aws.String(fmt.Sprintf("%s%d", arnPrefix, i)), ServiceName: aws.String(fmt.Sprintf("%s%d", arnPrefix, i)), @@ -457,7 +457,7 @@ func getPage(p pageInput) (*pageOutput, error) { func getArns(items any, arnGetter func(i int) *string) []*string { rv := reflect.ValueOf(items) var arns []*string - for i := 0; i < rv.Len(); i++ { + for i := range rv.Len() { arns = append(arns, arnGetter(i)) } return arns diff --git a/extension/observer/ecsobserver/internal/ecsmock/service_test.go b/extension/observer/ecsobserver/internal/ecsmock/service_test.go index 8105596b008e0..b894a8eb411f4 100644 --- a/extension/observer/ecsobserver/internal/ecsmock/service_test.go +++ b/extension/observer/ecsobserver/internal/ecsmock/service_test.go @@ -151,7 +151,7 @@ func TestCluster_DescribeInstancesWithContext(t *testing.T) { t.Run("get by id", func(t *testing.T) { var ids []*string nIDs := 100 - for i := 0; i < nIDs; i++ { + for i := range nIDs { ids = append(ids, aws.String(fmt.Sprintf("i-%d", i*10))) } req := &ec2.DescribeInstancesInput{InstanceIds: ids} @@ -191,7 +191,7 @@ func TestCluster_DescribeContainerInstancesWithContext(t *testing.T) { t.Run("get by id", func(t *testing.T) { var ids []*string nIDs := count - for i := 0; i < nIDs; i++ { + for i := range nIDs { ids = append(ids, aws.String(fmt.Sprintf("foo%d", i))) } req := &ecs.DescribeContainerInstancesInput{ContainerInstances: ids} diff --git a/extension/storage/dbstorage/extension_test.go b/extension/storage/dbstorage/extension_test.go index e7f775755016b..92a7b23aca7f2 100644 --- a/extension/storage/dbstorage/extension_test.go +++ b/extension/storage/dbstorage/extension_test.go @@ -78,28 +78,28 @@ func testExtensionIntegrity(t *testing.T, se storage.Extension) { myBytes := []byte(n.Name()) // Set my values - for i := 0; i < len(keys); i++ { + for i := range keys { err := c.Set(ctx, keys[i], myBytes) require.NoError(t, err) } // Repeatedly thrash client - for j := 0; j < 100; j++ { + for range 100 { // Make sure my values are still mine - for i := 0; i < len(keys); i++ { + for i := range keys { v, err := c.Get(ctx, keys[i]) require.NoError(t, err) require.Equal(t, myBytes, v) } // Delete my values - for i := 0; i < len(keys); i++ { + for i := range keys { err := c.Delete(ctx, keys[i]) require.NoError(t, err) } // Reset my values - for i := 0; i < len(keys); i++ { + for i := range keys { err := c.Set(ctx, keys[i], myBytes) require.NoError(t, err) } diff --git a/extension/storage/filestorage/client_test.go b/extension/storage/filestorage/client_test.go index d1d1b62138110..b20a8ebaebe05 100644 --- a/extension/storage/filestorage/client_test.go +++ b/extension/storage/filestorage/client_test.go @@ -293,7 +293,7 @@ func TestClientReboundCompaction(t *testing.T) { ) // 2. Remove the large entries - for i := 0; i < int(numEntries); i++ { + for i := range numEntries { _, realSize, err := client.getDbSize() require.NoError(t, err) if realSize < testCase.drainStorageBelowMiB*oneMiB { @@ -368,7 +368,7 @@ func TestClientConcurrentCompaction(t *testing.T) { // Start a couple of concurrent threads and see how they add/remove data as needed without failures clientOperationsThread := func(t *testing.T, id int) { repeats := 10 - for i := 0; i < repeats; i++ { + for i := range repeats { batchWrite := []*storage.Operation{ storage.SetOperation(fmt.Sprintf("foo-%d-%d", id, i), make([]byte, 1000)), storage.SetOperation(fmt.Sprintf("bar-%d-%d", id, i), []byte("testValueBar")), @@ -392,7 +392,7 @@ func TestClientConcurrentCompaction(t *testing.T) { } } - for i := 0; i < 10; i++ { + for i := range 10 { t.Run(fmt.Sprintf("client-operations-thread-%d", i), func(t *testing.T) { t.Parallel() clientOperationsThread(t, i) @@ -414,7 +414,7 @@ func BenchmarkClientGet(b *testing.B) { testKey := "testKey" b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { _, err = client.Get(ctx, testKey) require.NoError(b, err) } @@ -433,12 +433,12 @@ func BenchmarkClientGet100(b *testing.B) { ctx := context.Background() testEntries := make([]*storage.Operation, 100) - for i := 0; i < 100; i++ { + for i := range 100 { testEntries[i] = storage.GetOperation(fmt.Sprintf("testKey-%d", i)) } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { require.NoError(b, client.Batch(ctx, testEntries...)) } } @@ -458,7 +458,7 @@ func BenchmarkClientSet(b *testing.B) { testValue := []byte("testValue") b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { require.NoError(b, client.Set(ctx, testKey, testValue)) } } @@ -475,12 +475,12 @@ func BenchmarkClientSet100(b *testing.B) { ctx := context.Background() testEntries := make([]*storage.Operation, 100) - for i := 0; i < 100; i++ { + for i := range 100 { testEntries[i] = storage.SetOperation(fmt.Sprintf("testKey-%d", i), []byte("testValue")) } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { require.NoError(b, client.Batch(ctx, testEntries...)) } } @@ -499,7 +499,7 @@ func BenchmarkClientDelete(b *testing.B) { testKey := "testKey" b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { require.NoError(b, client.Delete(ctx, testKey)) } } @@ -523,12 +523,12 @@ func BenchmarkClientSetLargeDB(b *testing.B) { ctx := context.Background() - for n := 0; n < entryCount; n++ { + for n := range entryCount { testKey = fmt.Sprintf("testKey-%d", n) require.NoError(b, client.Set(ctx, testKey, entry)) } - for n := 0; n < entryCount; n++ { + for n := range entryCount { testKey = fmt.Sprintf("testKey-%d", n) require.NoError(b, client.Delete(ctx, testKey)) } @@ -536,7 +536,7 @@ func BenchmarkClientSetLargeDB(b *testing.B) { testKey = "testKey" testValue := []byte("testValue") b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { require.NoError(b, client.Set(ctx, testKey, testValue)) } } @@ -560,7 +560,7 @@ func BenchmarkClientInitLargeDB(b *testing.B) { ctx := context.Background() - for n := 0; n < entryCount; n++ { + for n := range entryCount { testKey = fmt.Sprintf("testKey-%d", n) require.NoError(b, client.Set(ctx, testKey, entry)) } @@ -570,7 +570,7 @@ func BenchmarkClientInitLargeDB(b *testing.B) { var tempClient *fileStorageClient b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { tempClient, err = newClient(zap.NewNop(), dbFile, time.Second, &CompactionConfig{}, false) require.NoError(b, err) b.StopTimer() @@ -597,13 +597,13 @@ func BenchmarkClientCompactLargeDBFile(b *testing.B) { ctx := context.Background() - for n := 0; n < entryCount; n++ { + for n := range entryCount { testKey = fmt.Sprintf("testKey-%d", n) require.NoError(b, client.Set(ctx, testKey, entry)) } // Leave one key in the db - for n := 0; n < entryCount-1; n++ { + for n := range entryCount - 1 { testKey = fmt.Sprintf("testKey-%d", n) require.NoError(b, client.Delete(ctx, testKey)) } @@ -612,7 +612,7 @@ func BenchmarkClientCompactLargeDBFile(b *testing.B) { b.ResetTimer() b.StopTimer() - for n := 0; n < b.N; n++ { + for n := range b.N { testDbFile := filepath.Join(tempDir, fmt.Sprintf("my_db%d", n)) err = os.Link(dbFile, testDbFile) require.NoError(b, err) @@ -641,13 +641,13 @@ func BenchmarkClientCompactDb(b *testing.B) { ctx := context.Background() - for n := 0; n < entryCount; n++ { + for n := range entryCount { testKey = fmt.Sprintf("testKey-%d", n) require.NoError(b, client.Set(ctx, testKey, entry)) } // Leave half the keys in the DB - for n := 0; n < entryCount/2; n++ { + for n := range entryCount / 2 { testKey = fmt.Sprintf("testKey-%d", n) require.NoError(b, client.Delete(ctx, testKey)) } @@ -656,7 +656,7 @@ func BenchmarkClientCompactDb(b *testing.B) { b.ResetTimer() b.StopTimer() - for n := 0; n < b.N; n++ { + for n := range b.N { testDbFile := filepath.Join(tempDir, fmt.Sprintf("my_db%d", n)) err = os.Link(dbFile, testDbFile) require.NoError(b, err) diff --git a/extension/storage/filestorage/extension_test.go b/extension/storage/filestorage/extension_test.go index cc67b9be9f657..5adaa7e0ece7c 100644 --- a/extension/storage/filestorage/extension_test.go +++ b/extension/storage/filestorage/extension_test.go @@ -62,28 +62,28 @@ func TestExtensionIntegrity(t *testing.T) { myBytes := []byte(n.Name()) // Set my values - for i := 0; i < len(keys); i++ { + for i := range keys { err := c.Set(ctx, keys[i], myBytes) require.NoError(t, err) } // Repeatedly thrash client - for j := 0; j < 100; j++ { + for range 100 { // Make sure my values are still mine - for i := 0; i < len(keys); i++ { + for i := range keys { v, err := c.Get(ctx, keys[i]) require.NoError(t, err) require.Equal(t, myBytes, v) } // Delete my values - for i := 0; i < len(keys); i++ { + for i := range keys { err := c.Delete(ctx, keys[i]) require.NoError(t, err) } // Reset my values - for i := 0; i < len(keys); i++ { + for i := range keys { err := c.Set(ctx, keys[i], myBytes) require.NoError(t, err) } @@ -335,7 +335,6 @@ func TestCompaction(t *testing.T) { require.NoError(t, err) var key string - var i int // magic numbers giving enough data to force bbolt to allocate a new page // see https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/9004 for some discussion @@ -344,7 +343,7 @@ func TestCompaction(t *testing.T) { entry := make([]byte, entrySize) // add the data to the db - for i = 0; i < numEntries; i++ { + for i := range numEntries { key = fmt.Sprintf("key_%d", i) err = client.Set(ctx, key, entry) require.NoError(t, err) @@ -365,7 +364,7 @@ func TestCompaction(t *testing.T) { require.Less(t, stats.Size(), newStats.Size()) // remove data from database - for i = 0; i < numEntries; i++ { + for i := range numEntries { key = fmt.Sprintf("key_%d", i) err = c.Delete(ctx, key) require.NoError(t, err) diff --git a/extension/storage/redisstorageextension/extension_test.go b/extension/storage/redisstorageextension/extension_test.go index 6e06154870a33..253f0d4fd408b 100644 --- a/extension/storage/redisstorageextension/extension_test.go +++ b/extension/storage/redisstorageextension/extension_test.go @@ -54,28 +54,28 @@ func TestExtensionIntegrity(t *testing.T) { myBytes := []byte(n.Name()) // Set my values - for i := 0; i < len(keys); i++ { + for i := range keys { err := c.Set(ctx, keys[i], myBytes) require.NoError(t, err) } // Repeatedly thrash client - for j := 0; j < 100; j++ { + for range 100 { // Make sure my values are still mine - for i := 0; i < len(keys); i++ { + for i := range keys { v, err := c.Get(ctx, keys[i]) require.NoError(t, err) require.Equal(t, myBytes, v) } // Delete my values - for i := 0; i < len(keys); i++ { + for i := range keys { err := c.Delete(ctx, keys[i]) require.NoError(t, err) } // Reset my values - for i := 0; i < len(keys); i++ { + for i := range keys { err := c.Set(ctx, keys[i], myBytes) require.NoError(t, err) } diff --git a/internal/aws/containerinsight/utils_test.go b/internal/aws/containerinsight/utils_test.go index 0a702d7f5d76a..50c9467115cd4 100644 --- a/internal/aws/containerinsight/utils_test.go +++ b/internal/aws/containerinsight/utils_test.go @@ -155,10 +155,10 @@ func checkMetricsAreExpected(t *testing.T, md pmetric.Metrics, fields map[string // check the metrics are expected ilms := rm.ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { ilm := ilms.At(j) ms := ilm.Metrics() - for k := 0; k < ms.Len(); k++ { + for k := range ms.Len() { m := ms.At(k) metricName := m.Name() log.Printf("metric=%v", metricName) diff --git a/internal/aws/cwlogs/pusher_test.go b/internal/aws/cwlogs/pusher_test.go index dd757e2095dbd..792b6620ec7ae 100644 --- a/internal/aws/cwlogs/pusher_test.go +++ b/internal/aws/cwlogs/pusher_test.go @@ -88,7 +88,7 @@ func TestLogEventBatch_sortLogEvents(t *testing.T) { }, } - for i := 0; i < totalEvents; i++ { + for i := range totalEvents { timestamp := rand.Int() logEvent := NewEvent( int64(timestamp), @@ -150,7 +150,7 @@ func TestPusher_addLogEventBatch(t *testing.T) { c := cap(p.logEventBatch.putLogEventsInput.LogEvents) logEvent := NewEvent(timestampMs, msg) - for i := 0; i < c; i++ { + for range c { p.logEventBatch.putLogEventsInput.LogEvents = append(p.logEventBatch.putLogEventsInput.LogEvents, logEvent.InputLogEvent) } diff --git a/internal/aws/metrics/metric_calculator_test.go b/internal/aws/metrics/metric_calculator_test.go index 58a032582e1e6..74273c440eb30 100644 --- a/internal/aws/metrics/metric_calculator_test.go +++ b/internal/aws/metrics/metric_calculator_test.go @@ -38,7 +38,7 @@ func TestFloat64RateCalculatorWithTooFrequentUpdate(t *testing.T) { assert.Equal(t, float64(0), r) nextTime := initTime - for i := 0; i < 10; i++ { + for range 10 { nextTime = nextTime.Add(5 * time.Millisecond) r, ok = c.Calculate(mKey, float64(105), nextTime) assert.False(t, ok) @@ -158,7 +158,7 @@ func TestMapWithExpiryConcurrency(t *testing.T) { var wg sync.WaitGroup wg.Add(2) go func() { - for i := 0; i < 30; i++ { + for range 30 { store.Lock() sum, _ := store.Get(Key{MetricMetadata: "sum"}) newSum := MetricValue{ @@ -171,7 +171,7 @@ func TestMapWithExpiryConcurrency(t *testing.T) { }() go func() { - for i := 0; i < 30; i++ { + for range 30 { store.Lock() sum, _ := store.Get(Key{MetricMetadata: "sum"}) newSum := MetricValue{ diff --git a/internal/coreinternal/aggregateutil/aggregate.go b/internal/coreinternal/aggregateutil/aggregate.go index 198ca09bee0f2..c0fe98f0957e1 100644 --- a/internal/coreinternal/aggregateutil/aggregate.go +++ b/internal/coreinternal/aggregateutil/aggregate.go @@ -103,35 +103,35 @@ func RangeDataPointAttributes(metric pmetric.Metric, f func(pcommon.Map) bool) { //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: - for i := 0; i < metric.Gauge().DataPoints().Len(); i++ { + for i := range metric.Gauge().DataPoints().Len() { dp := metric.Gauge().DataPoints().At(i) if !f(dp.Attributes()) { return } } case pmetric.MetricTypeSum: - for i := 0; i < metric.Sum().DataPoints().Len(); i++ { + for i := range metric.Sum().DataPoints().Len() { dp := metric.Sum().DataPoints().At(i) if !f(dp.Attributes()) { return } } case pmetric.MetricTypeHistogram: - for i := 0; i < metric.Histogram().DataPoints().Len(); i++ { + for i := range metric.Histogram().DataPoints().Len() { dp := metric.Histogram().DataPoints().At(i) if !f(dp.Attributes()) { return } } case pmetric.MetricTypeExponentialHistogram: - for i := 0; i < metric.ExponentialHistogram().DataPoints().Len(); i++ { + for i := range metric.ExponentialHistogram().DataPoints().Len() { dp := metric.ExponentialHistogram().DataPoints().At(i) if !f(dp.Attributes()) { return } } case pmetric.MetricTypeSummary: - for i := 0; i < metric.Summary().DataPoints().Len(); i++ { + for i := range metric.Summary().DataPoints().Len() { dp := metric.Summary().DataPoints().At(i) if !f(dp.Attributes()) { return @@ -271,7 +271,7 @@ func mergeHistogramDataPoints(dpsMap map[string]pmetric.HistogramDataPointSlice, if dp.HasMax() && dp.Max() < dps.At(i).Max() { dp.SetMax(dps.At(i).Max()) } - for b := 0; b < dps.At(i).BucketCounts().Len(); b++ { + for b := range dps.At(i).BucketCounts().Len() { counts.SetAt(b, counts.At(b)+dps.At(i).BucketCounts().At(b)) } dps.At(i).Exemplars().MoveAndAppendTo(dp.Exemplars()) @@ -302,10 +302,10 @@ func mergeExponentialHistogramDataPoints(dpsMap map[string]pmetric.ExponentialHi if dp.HasMax() && dp.Max() < dps.At(i).Max() { dp.SetMax(dps.At(i).Max()) } - for b := 0; b < dps.At(i).Negative().BucketCounts().Len(); b++ { + for b := range dps.At(i).Negative().BucketCounts().Len() { negatives.SetAt(b, negatives.At(b)+dps.At(i).Negative().BucketCounts().At(b)) } - for b := 0; b < dps.At(i).Positive().BucketCounts().Len(); b++ { + for b := range dps.At(i).Positive().BucketCounts().Len() { positives.SetAt(b, positives.At(b)+dps.At(i).Positive().BucketCounts().At(b)) } dps.At(i).Exemplars().MoveAndAppendTo(dp.Exemplars()) @@ -320,7 +320,7 @@ func groupNumberDataPoints(dps pmetric.NumberDataPointSlice, useStartTime bool, dpsByAttrsAndTs map[string]pmetric.NumberDataPointSlice, ) { var keyHashParts []any - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { if useStartTime { keyHashParts = []any{dps.At(i).StartTimestamp().String()} } @@ -335,10 +335,10 @@ func groupNumberDataPoints(dps pmetric.NumberDataPointSlice, useStartTime bool, func groupHistogramDataPoints(dps pmetric.HistogramDataPointSlice, useStartTime bool, dpsByAttrsAndTs map[string]pmetric.HistogramDataPointSlice, ) { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) keyHashParts := make([]any, 0, dp.ExplicitBounds().Len()+4) - for b := 0; b < dp.ExplicitBounds().Len(); b++ { + for b := range dp.ExplicitBounds().Len() { keyHashParts = append(keyHashParts, dp.ExplicitBounds().At(b)) } if useStartTime { @@ -357,7 +357,7 @@ func groupHistogramDataPoints(dps pmetric.HistogramDataPointSlice, useStartTime func groupExponentialHistogramDataPoints(dps pmetric.ExponentialHistogramDataPointSlice, useStartTime bool, dpsByAttrsAndTs map[string]pmetric.ExponentialHistogramDataPointSlice, ) { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) keyHashParts := make([]any, 0, 5) keyHashParts = append(keyHashParts, dp.Scale(), dp.HasMin(), dp.HasMax(), uint32(dp.Flags()), dp.Negative().Offset(), diff --git a/internal/coreinternal/goldendataset/metrics_gen.go b/internal/coreinternal/goldendataset/metrics_gen.go index fea84d9abf4ff..866effe5fe188 100644 --- a/internal/coreinternal/goldendataset/metrics_gen.go +++ b/internal/coreinternal/goldendataset/metrics_gen.go @@ -80,10 +80,10 @@ func (g *metricGenerator) genMetricFromCfg(cfg MetricsCfg) pmetric.Metrics { md := pmetric.NewMetrics() rms := md.ResourceMetrics() rms.EnsureCapacity(cfg.NumResourceMetrics) - for i := 0; i < cfg.NumResourceMetrics; i++ { + for range cfg.NumResourceMetrics { rm := rms.AppendEmpty() resource := rm.Resource() - for j := 0; j < cfg.NumResourceAttrs; j++ { + for j := range cfg.NumResourceAttrs { resource.Attributes().PutStr( fmt.Sprintf("resource-attr-name-%d", j), fmt.Sprintf("resource-attr-val-%d", j), @@ -97,7 +97,7 @@ func (g *metricGenerator) genMetricFromCfg(cfg MetricsCfg) pmetric.Metrics { func (g *metricGenerator) populateIlm(cfg MetricsCfg, rm pmetric.ResourceMetrics) { ilms := rm.ScopeMetrics() ilms.EnsureCapacity(cfg.NumILMPerResource) - for i := 0; i < cfg.NumILMPerResource; i++ { + for range cfg.NumILMPerResource { ilm := ilms.AppendEmpty() g.populateMetrics(cfg, ilm) } @@ -106,7 +106,7 @@ func (g *metricGenerator) populateIlm(cfg MetricsCfg, rm pmetric.ResourceMetrics func (g *metricGenerator) populateMetrics(cfg MetricsCfg, ilm pmetric.ScopeMetrics) { metrics := ilm.Metrics() metrics.EnsureCapacity(cfg.NumMetricsPerILM) - for i := 0; i < cfg.NumMetricsPerILM; i++ { + for range cfg.NumMetricsPerILM { metric := metrics.AppendEmpty() g.populateMetricDesc(cfg, metric) //exhaustive:enforce @@ -141,7 +141,7 @@ func (g *metricGenerator) populateMetricDesc(cfg MetricsCfg, metric pmetric.Metr func populateNumberPoints(cfg MetricsCfg, pts pmetric.NumberDataPointSlice) { pts.EnsureCapacity(cfg.NumPtsPerMetric) - for i := 0; i < cfg.NumPtsPerMetric; i++ { + for i := range cfg.NumPtsPerMetric { pt := pts.AppendEmpty() pt.SetStartTimestamp(pcommon.Timestamp(cfg.StartTime)) pt.SetTimestamp(getTimestamp(cfg.StartTime, cfg.StepSize, i)) @@ -160,7 +160,7 @@ func populateNumberPoints(cfg MetricsCfg, pts pmetric.NumberDataPointSlice) { func populateDoubleHistogram(cfg MetricsCfg, dh pmetric.Histogram) { pts := dh.DataPoints() pts.EnsureCapacity(cfg.NumPtsPerMetric) - for i := 0; i < cfg.NumPtsPerMetric; i++ { + for i := range cfg.NumPtsPerMetric { pt := pts.AppendEmpty() pt.SetStartTimestamp(pcommon.Timestamp(cfg.StartTime)) ts := getTimestamp(cfg.StartTime, cfg.StepSize, i) @@ -168,7 +168,7 @@ func populateDoubleHistogram(cfg MetricsCfg, dh pmetric.Histogram) { populatePtAttributes(cfg, pt.Attributes()) setDoubleHistogramBounds(pt, 1, 2, 3, 4, 5) addDoubleHistogramVal(pt, 1) - for i := 0; i < cfg.PtVal; i++ { + for range cfg.PtVal { addDoubleHistogramVal(pt, 3) } addDoubleHistogramVal(pt, 5) @@ -187,7 +187,7 @@ func addDoubleHistogramVal(hdp pmetric.HistogramDataPoint, val float64) { // TODO: HasSum, Min, HasMin, Max, HasMax are not covered in tests. buckets := hdp.BucketCounts() bounds := hdp.ExplicitBounds() - for i := 0; i < bounds.Len(); i++ { + for i := range bounds.Len() { bound := bounds.At(i) if val <= bound { buckets.SetAt(i, buckets.At(i)+1) @@ -197,7 +197,7 @@ func addDoubleHistogramVal(hdp pmetric.HistogramDataPoint, val float64) { } func populatePtAttributes(cfg MetricsCfg, lm pcommon.Map) { - for i := 0; i < cfg.NumPtLabels; i++ { + for i := range cfg.NumPtLabels { k := fmt.Sprintf("pt-label-key-%d", i) v := fmt.Sprintf("pt-label-val-%d", i) lm.PutStr(k, v) @@ -211,7 +211,7 @@ func getTimestamp(startTime uint64, stepSize uint64, i int) pcommon.Timestamp { func populateExpoHistogram(cfg MetricsCfg, dh pmetric.ExponentialHistogram) { pts := dh.DataPoints() pts.EnsureCapacity(cfg.NumPtsPerMetric) - for i := 0; i < cfg.NumPtsPerMetric; i++ { + for i := range cfg.NumPtsPerMetric { pt := pts.AppendEmpty() pt.SetStartTimestamp(pcommon.Timestamp(cfg.StartTime)) ts := getTimestamp(cfg.StartTime, cfg.StepSize, i) diff --git a/internal/coreinternal/goldendataset/span_generator.go b/internal/coreinternal/goldendataset/span_generator.go index 71720b1544432..95b84ff941762 100644 --- a/internal/coreinternal/goldendataset/span_generator.go +++ b/internal/coreinternal/goldendataset/span_generator.go @@ -42,7 +42,7 @@ func appendSpans(count int, pictFile string, random io.Reader, spanList ptrace.S var spanInputs *PICTSpanInputs var traceID pcommon.TraceID var parentID pcommon.SpanID - for i := 0; i < count; i++ { + for range count { if index >= pairsTotal { index = 1 } @@ -375,14 +375,14 @@ func appendMaxCountAttributes(includeStatus bool, attrMap pcommon.Map) { func appendSpanEvents(eventCnt PICTInputSpanChild, spanEvents ptrace.SpanEventSlice) { listSize := calculateListSize(eventCnt) - for i := 0; i < listSize; i++ { + for i := range listSize { appendSpanEvent(i, spanEvents) } } func appendSpanLinks(linkCnt PICTInputSpanChild, random io.Reader, spanLinks ptrace.SpanLinkSlice) { listSize := calculateListSize(linkCnt) - for i := 0; i < listSize; i++ { + for i := range listSize { appendSpanLink(random, i, spanLinks) } } diff --git a/internal/coreinternal/goldendataset/traces_generator.go b/internal/coreinternal/goldendataset/traces_generator.go index 8d8ad977ad68b..a35d07a555c69 100644 --- a/internal/coreinternal/goldendataset/traces_generator.go +++ b/internal/coreinternal/goldendataset/traces_generator.go @@ -91,7 +91,7 @@ func appendScopeSpans(tracingInputs *PICTTracingInputs, spanPairsFile string, case LibraryTwo: count = 2 } - for i := 0; i < count; i++ { + for i := range count { err := fillScopeSpans(tracingInputs, i, spanPairsFile, random, scopeSpansSlice.AppendEmpty()) if err != nil { return err diff --git a/internal/coreinternal/metricstestutil/metric_diff.go b/internal/coreinternal/metricstestutil/metric_diff.go index 8fc5a20b9eac3..35a4e69f736dc 100644 --- a/internal/coreinternal/metricstestutil/metric_diff.go +++ b/internal/coreinternal/metricstestutil/metric_diff.go @@ -37,7 +37,7 @@ func diffRMSlices(sent []pmetric.ResourceMetrics, recd []pmetric.ResourceMetrics Msg: "Sent vs received ResourceMetrics not equal length", }} } - for i := 0; i < len(sent); i++ { + for i := range sent { sentRM := sent[i] recdRM := recd[i] diffs = diffRMs(diffs, sentRM, recdRM) @@ -65,7 +65,7 @@ func diffILMSlice( if mismatch { return diffs } - for i := 0; i < expected.Len(); i++ { + for i := range expected.Len() { diffs = diffILM(diffs, expected.At(i), actual.At(i)) } return diffs @@ -85,7 +85,7 @@ func diffMetrics(diffs []*MetricDiff, expected pmetric.MetricSlice, actual pmetr if mismatch { return diffs } - for i := 0; i < expected.Len(); i++ { + for i := range expected.Len() { diffs = DiffMetric(diffs, expected.At(i), actual.At(i)) } return diffs @@ -98,7 +98,7 @@ func diffMetricData(expected, actual pmetric.Metrics) []*MetricDiff { } func toSlice(s pmetric.ResourceMetricsSlice) (out []pmetric.ResourceMetrics) { - for i := 0; i < s.Len(); i++ { + for i := range s.Len() { out = append(out, s.At(i)) } return out @@ -154,7 +154,7 @@ func diffNumberPts( if mismatch { return diffs } - for i := 0; i < expected.Len(); i++ { + for i := range expected.Len() { exPt := expected.At(i) acPt := actual.At(i) @@ -184,7 +184,7 @@ func diffHistogramPts( if mismatch { return diffs } - for i := 0; i < expected.Len(); i++ { + for i := range expected.Len() { diffs = diffHistogramPt(diffs, expected.At(i), actual.At(i)) } return diffs @@ -214,7 +214,7 @@ func diffExponentialHistogramPts( if mismatch { return diffs } - for i := 0; i < expected.Len(); i++ { + for i := range expected.Len() { diffs = diffExponentialHistogramPt(diffs, expected.At(i), actual.At(i)) } return diffs @@ -254,7 +254,7 @@ func diffExponentialHistogramPtBuckets( if mod { return diffs } - for i := 0; i < exC.Len(); i++ { + for i := range exC.Len() { diffs = diff(diffs, exC.At(i), acC.At(i), fmt.Sprintf("ExponentialHistogramDataPoint Buckets Count[%d]", i)) } return diffs @@ -270,7 +270,7 @@ func diffExemplars( if mismatch { return diffs } - for i := 0; i < expected.Len(); i++ { + for i := range expected.Len() { diffs = diff(diffs, expected.At(i).ValueType(), actual.At(i).ValueType(), "Exemplar Value Type") switch expected.At(i).ValueType() { case pmetric.ExemplarValueTypeInt: diff --git a/internal/coreinternal/parseutils/uri_test.go b/internal/coreinternal/parseutils/uri_test.go index 6fdb5612ce742..c79b146b367f3 100644 --- a/internal/coreinternal/parseutils/uri_test.go +++ b/internal/coreinternal/parseutils/uri_test.go @@ -456,7 +456,7 @@ func BenchmarkURLToMap(b *testing.B) { v := "https://dev:password@www.golang.org:8443/v1/app/stage?token=d9e28b1d-2c7b-4853-be6a-d94f34a5d4ab&env=prod&env=stage&token=c6fa29f9-a31b-4584-b98d-aa8473b0e18d®ion=us-east1b&mode=fast" u, err := url.ParseRequestURI(v) require.NoError(b, err) - for n := 0; n < b.N; n++ { + for range b.N { _, _ = urlToMap(u, m) } } @@ -466,7 +466,7 @@ func BenchmarkQueryToMap(b *testing.B) { v := "?token=d9e28b1d-2c7b-4853-be6a-d94f34a5d4ab&env=prod&env=stage&token=c6fa29f9-a31b-4584-b98d-aa8473b0e18d®ion=us-east1b&mode=fast" u, err := url.ParseQuery(v) require.NoError(b, err) - for n := 0; n < b.N; n++ { + for range b.N { queryToMap(u, m) } } @@ -476,7 +476,7 @@ func BenchmarkQueryParamValuesToMap(b *testing.B) { "d9e28b1d-2c7b-4853-be6a-d94f34a5d4ab", "c6fa29f9-a31b-4584-b98d-aa8473b0e18", } - for n := 0; n < b.N; n++ { + for range b.N { queryParamValuesToMap(v) } } diff --git a/internal/exp/metrics/metrics.go b/internal/exp/metrics/metrics.go index affdf76fa77f9..acda6582cd6dd 100644 --- a/internal/exp/metrics/metrics.go +++ b/internal/exp/metrics/metrics.go @@ -25,11 +25,11 @@ import ( // That said, this will do a large amount of memory copying func Merge(mdA pmetric.Metrics, mdB pmetric.Metrics) pmetric.Metrics { outer: - for i := 0; i < mdB.ResourceMetrics().Len(); i++ { + for i := range mdB.ResourceMetrics().Len() { rmB := mdB.ResourceMetrics().At(i) resourceIDB := identity.OfResource(rmB.Resource()) - for j := 0; j < mdA.ResourceMetrics().Len(); j++ { + for j := range mdA.ResourceMetrics().Len() { rmA := mdA.ResourceMetrics().At(j) resourceIDA := identity.OfResource(rmA.Resource()) @@ -50,11 +50,11 @@ outer: func mergeResourceMetrics(resourceID identity.Resource, rmA pmetric.ResourceMetrics, rmB pmetric.ResourceMetrics) pmetric.ResourceMetrics { outer: - for i := 0; i < rmB.ScopeMetrics().Len(); i++ { + for i := range rmB.ScopeMetrics().Len() { smB := rmB.ScopeMetrics().At(i) scopeIDB := identity.OfScope(resourceID, smB.Scope()) - for j := 0; j < rmA.ScopeMetrics().Len(); j++ { + for j := range rmA.ScopeMetrics().Len() { smA := rmA.ScopeMetrics().At(j) scopeIDA := identity.OfScope(resourceID, smA.Scope()) @@ -75,11 +75,11 @@ outer: func mergeScopeMetrics(scopeID identity.Scope, smA pmetric.ScopeMetrics, smB pmetric.ScopeMetrics) pmetric.ScopeMetrics { outer: - for i := 0; i < smB.Metrics().Len(); i++ { + for i := range smB.Metrics().Len() { mB := smB.Metrics().At(i) metricIDB := identity.OfMetric(scopeID, mB) - for j := 0; j < smA.Metrics().Len(); j++ { + for j := range smA.Metrics().Len() { mA := smA.Metrics().At(j) metricIDA := identity.OfMetric(scopeID, mA) @@ -113,7 +113,7 @@ outer: func mergeDataPoints[DPS dataPointSlice[DP], DP dataPoint[DP]](dataPointsA DPS, dataPointsB DPS) DPS { // Append all the datapoints from B to A - for i := 0; i < dataPointsB.Len(); i++ { + for i := range dataPointsB.Len() { dpB := dataPointsB.At(i) newDP := dataPointsA.AppendEmpty() diff --git a/internal/exp/metrics/metrics_test.go b/internal/exp/metrics/metrics_test.go index 0ee0188a13284..caaec8e6dd7ca 100644 --- a/internal/exp/metrics/metrics_test.go +++ b/internal/exp/metrics/metrics_test.go @@ -50,7 +50,7 @@ func TestMergeMetrics(t *testing.T) { } func naiveMerge(mdA pmetric.Metrics, mdB pmetric.Metrics) pmetric.Metrics { - for i := 0; i < mdB.ResourceMetrics().Len(); i++ { + for i := range mdB.ResourceMetrics().Len() { rm := mdB.ResourceMetrics().At(i) rmCopy := mdA.ResourceMetrics().AppendEmpty() @@ -82,7 +82,7 @@ func BenchmarkMergeManyIntoSingle(b *testing.B) { mdB := generateMetrics(b, 10000) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { b.StopTimer() mdA := pmetric.NewMetrics() mdAClean.CopyTo(mdA) @@ -115,7 +115,7 @@ func BenchmarkMergeManyIntoMany(b *testing.B) { mdB := generateMetrics(b, 10000) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { b.StopTimer() mdA := pmetric.NewMetrics() mdAClean.CopyTo(mdA) @@ -136,7 +136,7 @@ func generateMetrics(t require.TestingT, rmCount int) pmetric.Metrics { timeStamp := pcommon.Timestamp(rand.IntN(256)) value := rand.Int64N(256) - for i := 0; i < rmCount; i++ { + for range rmCount { rm := md.ResourceMetrics().AppendEmpty() err := rm.Resource().Attributes().FromRaw(map[string]any{ conventions.AttributeServiceName: "service-test", diff --git a/internal/filter/filterexpr/matcher.go b/internal/filter/filterexpr/matcher.go index 2b4126e040a4d..de6b2be236056 100644 --- a/internal/filter/filterexpr/matcher.go +++ b/internal/filter/filterexpr/matcher.go @@ -70,7 +70,7 @@ func (m *Matcher) MatchMetric(metric pmetric.Metric) (bool, error) { func (m *Matcher) matchGauge(metricName string, gauge pmetric.Gauge, vm *vm.VM) (bool, error) { pts := gauge.DataPoints() - for i := 0; i < pts.Len(); i++ { + for i := range pts.Len() { matched, err := m.matchEnv(metricName, pmetric.MetricTypeGauge, pts.At(i).Attributes(), vm) if err != nil { return false, err @@ -84,7 +84,7 @@ func (m *Matcher) matchGauge(metricName string, gauge pmetric.Gauge, vm *vm.VM) func (m *Matcher) matchSum(metricName string, sum pmetric.Sum, vm *vm.VM) (bool, error) { pts := sum.DataPoints() - for i := 0; i < pts.Len(); i++ { + for i := range pts.Len() { matched, err := m.matchEnv(metricName, pmetric.MetricTypeSum, pts.At(i).Attributes(), vm) if err != nil { return false, err @@ -98,7 +98,7 @@ func (m *Matcher) matchSum(metricName string, sum pmetric.Sum, vm *vm.VM) (bool, func (m *Matcher) matchHistogram(metricName string, histogram pmetric.Histogram, vm *vm.VM) (bool, error) { pts := histogram.DataPoints() - for i := 0; i < pts.Len(); i++ { + for i := range pts.Len() { matched, err := m.matchEnv(metricName, pmetric.MetricTypeHistogram, pts.At(i).Attributes(), vm) if err != nil { return false, err @@ -112,7 +112,7 @@ func (m *Matcher) matchHistogram(metricName string, histogram pmetric.Histogram, func (m *Matcher) matchExponentialHistogram(metricName string, eh pmetric.ExponentialHistogram, vm *vm.VM) (bool, error) { pts := eh.DataPoints() - for i := 0; i < pts.Len(); i++ { + for i := range pts.Len() { matched, err := m.matchEnv(metricName, pmetric.MetricTypeExponentialHistogram, pts.At(i).Attributes(), vm) if err != nil { return false, err @@ -126,7 +126,7 @@ func (m *Matcher) matchExponentialHistogram(metricName string, eh pmetric.Expone func (m *Matcher) matchSummary(metricName string, summary pmetric.Summary, vm *vm.VM) (bool, error) { pts := summary.DataPoints() - for i := 0; i < pts.Len(); i++ { + for i := range pts.Len() { matched, err := m.matchEnv(metricName, pmetric.MetricTypeSummary, pts.At(i).Attributes(), vm) if err != nil { return false, err diff --git a/internal/filter/filterexpr/matcher_test.go b/internal/filter/filterexpr/matcher_test.go index ca26ec7e81b12..fc7a51e894bb5 100644 --- a/internal/filter/filterexpr/matcher_test.go +++ b/internal/filter/filterexpr/matcher_test.go @@ -224,7 +224,7 @@ func TestParallel(t *testing.T) { testMetric := func(t *testing.T, count int) { defer wg.Done() <-start - for i := 0; i < count; i++ { + for range count { m := pmetric.NewMetric() m.SetName("my.metric") m.SetEmptySum().DataPoints().AppendEmpty() @@ -234,7 +234,7 @@ func TestParallel(t *testing.T) { } } - for i := 0; i < 20; i++ { + for range 20 { wg.Add(1) go testMetric(t, 20) } diff --git a/internal/filter/filterlog/filterlog_test.go b/internal/filter/filterlog/filterlog_test.go index ebfd5e483d3c1..9bba3aa31b664 100644 --- a/internal/filter/filterlog/filterlog_test.go +++ b/internal/filter/filterlog/filterlog_test.go @@ -1381,7 +1381,7 @@ func BenchmarkFilterlog_NewSkipExpr(b *testing.B) { tCtx := ottllog.NewTransformContext(log, scope, resource, plog.NewScopeLogs(), plog.NewResourceLogs()) b.Run(tt.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { var skip bool skip, err = skipExpr.Eval(context.Background(), tCtx) assert.NoError(b, err) diff --git a/internal/filter/filtermatcher/attributematcher_test.go b/internal/filter/filtermatcher/attributematcher_test.go index 4ae706b774297..2732ad363331f 100644 --- a/internal/filter/filtermatcher/attributematcher_test.go +++ b/internal/filter/filtermatcher/attributematcher_test.go @@ -62,7 +62,7 @@ func BenchmarkMatchAttributes(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { matcher.Match(matchingMap) matcher.Match(notMatchingMap) } diff --git a/internal/filter/filterottl/functions.go b/internal/filter/filterottl/functions.go index 3612d184966fa..c6b189fbfc3ee 100644 --- a/internal/filter/filterottl/functions.go +++ b/internal/filter/filterottl/functions.go @@ -124,7 +124,7 @@ func checkDataPoints(tCtx ottlmetric.TransformContext, key string, expectedVal * } func checkNumberDataPointSlice(dps pmetric.NumberDataPointSlice, key string, expectedVal *string) bool { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) value, ok := dp.Attributes().Get(key) if ok { @@ -138,7 +138,7 @@ func checkNumberDataPointSlice(dps pmetric.NumberDataPointSlice, key string, exp } func checkHistogramDataPointSlice(dps pmetric.HistogramDataPointSlice, key string, expectedVal *string) bool { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) value, ok := dp.Attributes().Get(key) if ok { @@ -152,7 +152,7 @@ func checkHistogramDataPointSlice(dps pmetric.HistogramDataPointSlice, key strin } func checkExponentialHistogramDataPointSlice(dps pmetric.ExponentialHistogramDataPointSlice, key string, expectedVal *string) bool { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) value, ok := dp.Attributes().Get(key) if ok { @@ -166,7 +166,7 @@ func checkExponentialHistogramDataPointSlice(dps pmetric.ExponentialHistogramDat } func checkSummaryDataPointSlice(dps pmetric.SummaryDataPointSlice, key string, expectedVal *string) bool { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) value, ok := dp.Attributes().Get(key) if ok { diff --git a/internal/filter/filterspan/filterspan_test.go b/internal/filter/filterspan/filterspan_test.go index b06171543c1a3..d718c783fbaeb 100644 --- a/internal/filter/filterspan/filterspan_test.go +++ b/internal/filter/filterspan/filterspan_test.go @@ -1281,7 +1281,7 @@ func BenchmarkFilterspan_NewSkipExpr(b *testing.B) { tCtx := ottlspan.NewTransformContext(span, scope, resource, ptrace.NewScopeSpans(), ptrace.NewResourceSpans()) b.Run(tt.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { var skip bool skip, err = skipExpr.Eval(context.Background(), tCtx) assert.NoError(b, err) diff --git a/internal/metadataproviders/k8snode/metadata_test.go b/internal/metadataproviders/k8snode/metadata_test.go index 65fc2ffbb3f6e..37b63e86c46c0 100644 --- a/internal/metadataproviders/k8snode/metadata_test.go +++ b/internal/metadataproviders/k8snode/metadata_test.go @@ -80,7 +80,7 @@ func TestNodeUID(t *testing.T) { } func setupNodes(client *fake.Clientset) error { - for i := 0; i < 3; i++ { + for i := range 3 { n := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ UID: types.UID("node" + strconv.Itoa(i)), diff --git a/internal/otelarrow/admission2/notification_test.go b/internal/otelarrow/admission2/notification_test.go index 1e66d8445a414..45a7e1bdceceb 100644 --- a/internal/otelarrow/admission2/notification_test.go +++ b/internal/otelarrow/admission2/notification_test.go @@ -17,7 +17,7 @@ func TestNotification(t *testing.T) { require.False(start.HasBeenNotified()) done := make([]N, 5) - for i := 0; i < 5; i++ { + for i := range 5 { done[i] = newNotification() go func(i int) { start.WaitForNotification() diff --git a/internal/otelarrow/compression/zstd/mru_test.go b/internal/otelarrow/compression/zstd/mru_test.go index d21038eb1d6d0..0a6378106db8a 100644 --- a/internal/otelarrow/compression/zstd/mru_test.go +++ b/internal/otelarrow/compression/zstd/mru_test.go @@ -24,7 +24,7 @@ func TestMRUGet(t *testing.T) { v, g := m.Get() require.Nil(t, v) - for i := 0; i < cnt; i++ { + for i := range cnt { p := &gint{ value: i + 1, Gen: g, @@ -32,7 +32,7 @@ func TestMRUGet(t *testing.T) { m.Put(p) } - for i := 0; i < cnt; i++ { + for i := range cnt { v, _ = m.Get() require.Equal(t, 5-i, v.value) } @@ -52,7 +52,7 @@ func TestMRUPut(t *testing.T) { g := m.Reset() - for i := 0; i < cnt; i++ { + for i := range cnt { p := &gint{ value: i + 1, Gen: g, diff --git a/internal/otelarrow/netstats/netstats_test.go b/internal/otelarrow/netstats/netstats_test.go index a7275b9005dd6..b4323d0aac9d9 100644 --- a/internal/otelarrow/netstats/netstats_test.go +++ b/internal/otelarrow/netstats/netstats_test.go @@ -173,7 +173,7 @@ func testNetStatsExporter(t *testing.T, level configtelemetry.Level, expect map[ handler := enr.Handler() ctx := context.Background() - for i := 0; i < 10; i++ { + for range 10 { if apiDirect { // use the direct API enr.CountSend(ctx, SizesStruct{ @@ -313,7 +313,7 @@ func testNetStatsReceiver(t *testing.T, level configtelemetry.Level, expect map[ handler := rer.Handler() ctx := context.Background() - for i := 0; i < 10; i++ { + for range 10 { if apiDirect { // use the direct API rer.CountReceive(ctx, SizesStruct{ @@ -367,7 +367,7 @@ func TestUncompressedSizeBypass(t *testing.T) { handler := enr.Handler() ctx := context.Background() - for i := 0; i < 10; i++ { + for range 10 { // simulate the RPC path handler.HandleRPC(handler.TagRPC(ctx, &stats.RPCTagInfo{ FullMethodName: "my.arrow.v1.method", diff --git a/internal/otelarrow/test/e2e_test.go b/internal/otelarrow/test/e2e_test.go index b6300067b3ed6..312d6703fb55f 100644 --- a/internal/otelarrow/test/e2e_test.go +++ b/internal/otelarrow/test/e2e_test.go @@ -221,7 +221,7 @@ func testIntegrationTraces(ctx context.Context, t *testing.T, tp testParams, cfg expect := make([][]ptrace.Traces, tp.threadCount) - for num := 0; num < tp.threadCount; num++ { + for num := range tp.threadCount { clientDoneWG.Add(1) go func(num int) { defer clientDoneWG.Done() diff --git a/internal/pdatautil/logs.go b/internal/pdatautil/logs.go index 344bc6c2a8a2d..e6e1667203ba0 100644 --- a/internal/pdatautil/logs.go +++ b/internal/pdatautil/logs.go @@ -15,11 +15,11 @@ import ( func FlattenLogs(rls plog.ResourceLogsSlice) { tmp := plog.NewResourceLogsSlice() rls.MoveAndAppendTo(tmp) - for i := 0; i < tmp.Len(); i++ { + for i := range tmp.Len() { groupedResource := tmp.At(i) - for j := 0; j < groupedResource.ScopeLogs().Len(); j++ { + for j := range groupedResource.ScopeLogs().Len() { groupedScope := groupedResource.ScopeLogs().At(j) - for k := 0; k < groupedScope.LogRecords().Len(); k++ { + for k := range groupedScope.LogRecords().Len() { flatResource := rls.AppendEmpty() groupedResource.Resource().Attributes().CopyTo(flatResource.Resource().Attributes()) flatScope := flatResource.ScopeLogs().AppendEmpty() @@ -37,15 +37,15 @@ func FlattenLogs(rls plog.ResourceLogsSlice) { func GroupByResourceLogs(rls plog.ResourceLogsSlice) { // Hash each ResourceLogs based on identifying information. resourceHashes := make([][16]byte, rls.Len()) - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { resourceHashes[i] = pdatautil.MapHash(rls.At(i).Resource().Attributes()) } // Find the first occurrence of each hash and note the index. firstScopeIndex := make([]int, rls.Len()) - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { firstScopeIndex[i] = i - for j := 0; j < i; j++ { + for j := range i { if resourceHashes[i] == resourceHashes[j] { firstScopeIndex[i] = j break @@ -54,7 +54,7 @@ func GroupByResourceLogs(rls plog.ResourceLogsSlice) { } // Merge Resources with the same hash. - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { if i == firstScopeIndex[i] { // This is the first occurrence of this hash. continue @@ -71,7 +71,7 @@ func GroupByResourceLogs(rls plog.ResourceLogsSlice) { }) // Merge ScopeLogs within each ResourceLogs. - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { GroupByScopeLogs(rls.At(i).ScopeLogs()) } } @@ -80,15 +80,15 @@ func GroupByResourceLogs(rls plog.ResourceLogsSlice) { func GroupByScopeLogs(sls plog.ScopeLogsSlice) { // Hash each ScopeLogs based on identifying information. scopeHashes := make([][16]byte, sls.Len()) - for i := 0; i < sls.Len(); i++ { + for i := range sls.Len() { scopeHashes[i] = HashScopeLogs(sls.At(i)) } // Find the first occurrence of each hash and note the index. firstScopeIndex := make([]int, sls.Len()) - for i := 0; i < sls.Len(); i++ { + for i := range sls.Len() { firstScopeIndex[i] = i - for j := 0; j < i; j++ { + for j := range i { if scopeHashes[i] == scopeHashes[j] { firstScopeIndex[i] = j break @@ -97,7 +97,7 @@ func GroupByScopeLogs(sls plog.ScopeLogsSlice) { } // Merge ScopeLogs with the same hash. - for i := 0; i < sls.Len(); i++ { + for i := range sls.Len() { if i == firstScopeIndex[i] { // This is the first occurrence of this hash. continue diff --git a/internal/pdatautil/logs_test.go b/internal/pdatautil/logs_test.go index 75b51344a24e5..e5400c333fb4c 100644 --- a/internal/pdatautil/logs_test.go +++ b/internal/pdatautil/logs_test.go @@ -122,7 +122,7 @@ func TestFlattenResourceLogs(t *testing.T) { expected := setupResourceLogsSlice(tc.expected) FlattenLogs(actual) assert.Equal(t, expected.Len(), actual.Len()) - for i := 0; i < expected.Len(); i++ { + for i := range expected.Len() { assert.NoError(t, plogtest.CompareResourceLogs(expected.At(i), actual.At(i))) } }) @@ -417,7 +417,7 @@ func TestGroupByResourceLogs(t *testing.T) { expected := setupResourceLogsSlice(tc.expected) GroupByResourceLogs(actual) assert.Equal(t, expected.Len(), actual.Len()) - for i := 0; i < expected.Len(); i++ { + for i := range expected.Len() { assert.NoError(t, plogtest.CompareResourceLogs(expected.At(i), actual.At(i))) } }) @@ -486,7 +486,7 @@ func TestGroupByScopeLogs(t *testing.T) { expected := setupScopeLogsSlice(tc.expected) GroupByScopeLogs(actual) assert.Equal(t, expected.Len(), actual.Len()) - for i := 0; i < expected.Len(); i++ { + for i := range expected.Len() { assert.NoError(t, plogtest.CompareScopeLogs(expected.At(i), actual.At(i))) } }) diff --git a/internal/sqlquery/db_client_test.go b/internal/sqlquery/db_client_test.go index a95521f73da74..8522b5d2f55b3 100644 --- a/internal/sqlquery/db_client_test.go +++ b/internal/sqlquery/db_client_test.go @@ -123,7 +123,7 @@ type fakeRows struct { func (r *fakeRows) ColumnTypes() ([]colType, error) { var out []colType - for i := 0; i < len(r.vals[0]); i++ { + for i := range len(r.vals[0]) { out = append(out, fakeCol{fmt.Sprintf("col_%d", i)}) } return out, nil diff --git a/pkg/batchperresourceattr/batchperresourceattr.go b/pkg/batchperresourceattr/batchperresourceattr.go index 510b41f6fca24..7be6f2019e766 100644 --- a/pkg/batchperresourceattr/batchperresourceattr.go +++ b/pkg/batchperresourceattr/batchperresourceattr.go @@ -49,7 +49,7 @@ func (bt *batchTraces) ConsumeTraces(ctx context.Context, td ptrace.Traces) erro } indicesByAttr := make(map[string][]int) - for i := 0; i < lenRss; i++ { + for i := range lenRss { rs := rss.At(i) var attrVal string @@ -112,7 +112,7 @@ func (bt *batchMetrics) ConsumeMetrics(ctx context.Context, td pmetric.Metrics) } indicesByAttr := make(map[string][]int) - for i := 0; i < lenRms; i++ { + for i := range lenRms { rm := rms.At(i) var attrVal string for _, k := range bt.attrKeys { @@ -173,7 +173,7 @@ func (bt *batchLogs) ConsumeLogs(ctx context.Context, td plog.Logs) error { } indicesByAttr := make(map[string][]int) - for i := 0; i < lenRls; i++ { + for i := range lenRls { rl := rls.At(i) var attrVal string for _, k := range bt.attrKeys { diff --git a/pkg/batchperresourceattr/batchperresourceattr_test.go b/pkg/batchperresourceattr/batchperresourceattr_test.go index e1ae568ccc2e0..36ed6b71cdcf4 100644 --- a/pkg/batchperresourceattr/batchperresourceattr_test.go +++ b/pkg/batchperresourceattr/batchperresourceattr_test.go @@ -459,13 +459,13 @@ func BenchmarkBatchPerResourceTraces(b *testing.B) { inBatch := ptrace.NewTraces() rss := inBatch.ResourceSpans() rss.EnsureCapacity(64) - for i := 0; i < 64; i++ { + for i := range 64 { fillResourceSpans(rss.AppendEmpty(), "attr_key", strconv.Itoa(i%8)) } bpr := NewBatchPerResourceTraces("attr_key", consumertest.NewNop()) b.ReportAllocs() b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { if err := bpr.ConsumeTraces(context.Background(), inBatch); err != nil { b.Fail() } @@ -475,13 +475,13 @@ func BenchmarkBatchPerResourceTraces(b *testing.B) { func BenchmarkBatchPerResourceMetrics(b *testing.B) { inBatch := pmetric.NewMetrics() inBatch.ResourceMetrics().EnsureCapacity(64) - for i := 0; i < 64; i++ { + for i := range 64 { fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", strconv.Itoa(i%8)) } bpr := NewBatchPerResourceMetrics("attr_key", consumertest.NewNop()) b.ReportAllocs() b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { if err := bpr.ConsumeMetrics(context.Background(), inBatch); err != nil { b.Fail() } @@ -491,13 +491,13 @@ func BenchmarkBatchPerResourceMetrics(b *testing.B) { func BenchmarkBatchPerResourceLogs(b *testing.B) { inBatch := plog.NewLogs() inBatch.ResourceLogs().EnsureCapacity(64) - for i := 0; i < 64; i++ { + for i := range 64 { fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", strconv.Itoa(i%8)) } bpr := NewBatchPerResourceLogs("attr_key", consumertest.NewNop()) b.ReportAllocs() b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { if err := bpr.ConsumeLogs(context.Background(), inBatch); err != nil { b.Fail() } diff --git a/pkg/batchpersignal/batchpersignal.go b/pkg/batchpersignal/batchpersignal.go index 01b9858a67f94..5980bef42042f 100644 --- a/pkg/batchpersignal/batchpersignal.go +++ b/pkg/batchpersignal/batchpersignal.go @@ -16,15 +16,15 @@ func SplitTraces(batch ptrace.Traces) []ptrace.Traces { // if the same traceID exists in different ils, they land in different batches. var result []ptrace.Traces - for i := 0; i < batch.ResourceSpans().Len(); i++ { + for i := range batch.ResourceSpans().Len() { rs := batch.ResourceSpans().At(i) - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { // the batches for this ILS batches := map[pcommon.TraceID]ptrace.ResourceSpans{} ils := rs.ScopeSpans().At(j) - for k := 0; k < ils.Spans().Len(); k++ { + for k := range ils.Spans().Len() { span := ils.Spans().At(k) key := span.TraceID() @@ -63,15 +63,15 @@ func SplitLogs(batch plog.Logs) []plog.Logs { // if the same traceID exists in different sl, they land in different batches. var result []plog.Logs - for i := 0; i < batch.ResourceLogs().Len(); i++ { + for i := range batch.ResourceLogs().Len() { rs := batch.ResourceLogs().At(i) - for j := 0; j < rs.ScopeLogs().Len(); j++ { + for j := range rs.ScopeLogs().Len() { // the batches for this ILL batches := map[pcommon.TraceID]plog.ResourceLogs{} sl := rs.ScopeLogs().At(j) - for k := 0; k < sl.LogRecords().Len(); k++ { + for k := range sl.LogRecords().Len() { log := sl.LogRecords().At(k) key := log.TraceID() @@ -110,15 +110,15 @@ func SplitMetrics(batch pmetric.Metrics) []pmetric.Metrics { // if the same metricName exists in different ils, they land in different batches. var result []pmetric.Metrics - for i := 0; i < batch.ResourceMetrics().Len(); i++ { + for i := range batch.ResourceMetrics().Len() { rs := batch.ResourceMetrics().At(i) - for j := 0; j < rs.ScopeMetrics().Len(); j++ { + for j := range rs.ScopeMetrics().Len() { // the batches for this ILS batches := map[string]pmetric.ResourceMetrics{} ils := rs.ScopeMetrics().At(j) - for k := 0; k < ils.Metrics().Len(); k++ { + for k := range ils.Metrics().Len() { metric := ils.Metrics().At(k) // key := pcommon.NewByteSlice() diff --git a/pkg/golden/normalize_timestamps.go b/pkg/golden/normalize_timestamps.go index cb961c4576e17..b1b0f6635ac6e 100644 --- a/pkg/golden/normalize_timestamps.go +++ b/pkg/golden/normalize_timestamps.go @@ -15,9 +15,9 @@ import ( func normalizeTimestamps(metrics pmetric.Metrics) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { - for j := 0; j < rms.At(i).ScopeMetrics().Len(); j++ { - for k := 0; k < rms.At(i).ScopeMetrics().At(j).Metrics().Len(); k++ { + for i := range rms.Len() { + for j := range rms.At(i).ScopeMetrics().Len() { + for k := range rms.At(i).ScopeMetrics().At(j).Metrics().Len() { m := rms.At(i).ScopeMetrics().At(j).Metrics().At(k) //exhaustive:enforce switch m.Type() { @@ -87,7 +87,7 @@ type dataPoint interface { func normalizeDataPointSlice[T dataPoint](dps dataPointSlice[T]) { attrCache := make(map[[16]byte]bool) - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { attrHash := pdatautil.MapHash(dps.At(i).Attributes()) if attrCache[attrHash] { continue @@ -103,7 +103,7 @@ func normalizeDataPointSlice[T dataPoint](dps dataPointSlice[T]) { } normalizedTs := normalizeTimeSeries(timeSeries) - for k := 0; k < dps.Len(); k++ { + for k := range dps.Len() { if pdatautil.MapHash(dps.At(k).Attributes()) != attrHash { continue } diff --git a/pkg/golden/sort_metrics.go b/pkg/golden/sort_metrics.go index 062267a8c93b5..38ab93a1e78be 100644 --- a/pkg/golden/sort_metrics.go +++ b/pkg/golden/sort_metrics.go @@ -15,39 +15,39 @@ import ( // sorts all Resource Metrics attributes and Datapoint Slice metric attributes and all Resource, Scope, and Datapoint Slices func sortMetrics(ms pmetric.Metrics) { rms := ms.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { sortAttributeMap(rms.At(i).Resource().Attributes()) ilms := rms.At(i).ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { sortAttributeMap(ilms.At(j).Scope().Attributes()) metricsList := ilms.At(j).Metrics() - for k := 0; k < metricsList.Len(); k++ { + for k := range metricsList.Len() { metric := metricsList.At(k) //exhaustive:enforce switch metricsList.At(k).Type() { case pmetric.MetricTypeGauge: ds := metric.Gauge().DataPoints() - for l := 0; l < ds.Len(); l++ { + for l := range ds.Len() { sortAttributeMap(ds.At(l).Attributes()) } case pmetric.MetricTypeSum: ds := metric.Sum().DataPoints() - for l := 0; l < ds.Len(); l++ { + for l := range ds.Len() { sortAttributeMap(ds.At(l).Attributes()) } case pmetric.MetricTypeHistogram: ds := metric.Histogram().DataPoints() - for l := 0; l < ds.Len(); l++ { + for l := range ds.Len() { sortAttributeMap(ds.At(l).Attributes()) } case pmetric.MetricTypeExponentialHistogram: ds := metric.ExponentialHistogram().DataPoints() - for l := 0; l < ds.Len(); l++ { + for l := range ds.Len() { sortAttributeMap(ds.At(l).Attributes()) } case pmetric.MetricTypeSummary: ds := metric.Summary().DataPoints() - for l := 0; l < ds.Len(); l++ { + for l := range ds.Len() { sortAttributeMap(ds.At(l).Attributes()) } } @@ -95,9 +95,9 @@ func sortAttributeMap(mp pcommon.Map) { // sortMetricDataPointSlices sorts the datapoint slice of a pmetric.Metrics according to the alphanumeric ordering of map key func sortMetricDataPointSlices(ms pmetric.Metrics) { - for i := 0; i < ms.ResourceMetrics().Len(); i++ { - for j := 0; j < ms.ResourceMetrics().At(i).ScopeMetrics().Len(); j++ { - for k := 0; k < ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().Len(); k++ { + for i := range ms.ResourceMetrics().Len() { + for j := range ms.ResourceMetrics().At(i).ScopeMetrics().Len() { + for k := range ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().Len() { m := ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().At(k) //exhaustive:enforce switch m.Type() { @@ -124,7 +124,7 @@ func sortResources(ms pmetric.Metrics) { } func sortScopes(ms pmetric.Metrics) { - for i := 0; i < ms.ResourceMetrics().Len(); i++ { + for i := range ms.ResourceMetrics().Len() { rm := ms.ResourceMetrics().At(i) rm.ScopeMetrics().Sort(func(a, b pmetric.ScopeMetrics) bool { return compareMaps(a.Scope().Attributes(), b.Scope().Attributes()) < 0 @@ -174,7 +174,7 @@ func compareMaps(a, b pcommon.Map) int { return true }) - for i := 0; i < len(aKeys); i++ { + for i := range aKeys { if aKeys[i] != bKeys[i] { return strings.Compare(aKeys[i], bKeys[i]) } diff --git a/pkg/ottl/compare_test.go b/pkg/ottl/compare_test.go index e051ececb7391..31a14730593cb 100644 --- a/pkg/ottl/compare_test.go +++ b/pkg/ottl/compare_test.go @@ -122,7 +122,7 @@ func BenchmarkCompareEQInt64(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(i64a, i64b, eq) } } @@ -131,7 +131,7 @@ func BenchmarkCompareEQFloat(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(f64a, f64b, eq) } } @@ -140,7 +140,7 @@ func BenchmarkCompareEQString(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(sa, sb, eq) } } @@ -149,7 +149,7 @@ func BenchmarkCompareEQPString(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(&sa, &sb, eq) } } @@ -158,7 +158,7 @@ func BenchmarkCompareEQBytes(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(ba, bb, eq) } } @@ -167,7 +167,7 @@ func BenchmarkCompareEQNil(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(nil, nil, eq) } } @@ -176,7 +176,7 @@ func BenchmarkCompareNEInt(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(i64a, i64b, ne) } } @@ -185,7 +185,7 @@ func BenchmarkCompareNEFloat(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(f64a, f64b, ne) } } @@ -194,7 +194,7 @@ func BenchmarkCompareNEString(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(sa, sb, ne) } } @@ -203,7 +203,7 @@ func BenchmarkCompareLTFloat(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(f64a, f64b, lt) } } @@ -212,7 +212,7 @@ func BenchmarkCompareLTString(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(sa, sb, lt) } } @@ -221,7 +221,7 @@ func BenchmarkCompareLTNil(b *testing.B) { testParser, _ := NewParser[any](nil, nil, componenttest.NewNopTelemetrySettings()) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { testParser.compare(nil, nil, lt) } } @@ -240,7 +240,7 @@ func compareEq(a any, b any, op compareOp) bool { } func BenchmarkCompareEQFunction(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { compareEq(sa, sb, eq) } } diff --git a/pkg/ottl/contexts/internal/logging/logging.go b/pkg/ottl/contexts/internal/logging/logging.go index 4837506d55d93..f06a77deb32b6 100644 --- a/pkg/ottl/contexts/internal/logging/logging.go +++ b/pkg/ottl/contexts/internal/logging/logging.go @@ -18,7 +18,7 @@ type Slice pcommon.Slice func (s Slice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { ss := pcommon.Slice(s) var err error - for i := 0; i < ss.Len(); i++ { + for i := range ss.Len() { v := ss.At(i) switch v.Type() { case pcommon.ValueTypeStr: @@ -118,7 +118,7 @@ type SpanEventSlice ptrace.SpanEventSlice func (s SpanEventSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { ses := ptrace.SpanEventSlice(s) var err error - for i := 0; i < ses.Len(); i++ { + for i := range ses.Len() { err = errors.Join(err, encoder.AppendObject(SpanEvent(ses.At(i)))) } return err @@ -140,7 +140,7 @@ type SpanLinkSlice ptrace.SpanLinkSlice func (s SpanLinkSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { sls := ptrace.SpanLinkSlice(s) var err error - for i := 0; i < sls.Len(); i++ { + for i := range sls.Len() { err = errors.Join(err, encoder.AppendObject(SpanLink(sls.At(i)))) } return err @@ -196,7 +196,7 @@ type NumberDataPointSlice pmetric.NumberDataPointSlice func (n NumberDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { ndps := pmetric.NumberDataPointSlice(n) var err error - for i := 0; i < ndps.Len(); i++ { + for i := range ndps.Len() { err = errors.Join(err, encoder.AppendObject(NumberDataPoint(ndps.At(i)))) } return err @@ -226,7 +226,7 @@ type HistogramDataPointSlice pmetric.HistogramDataPointSlice func (h HistogramDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { hdps := pmetric.HistogramDataPointSlice(h) var err error - for i := 0; i < hdps.Len(); i++ { + for i := range hdps.Len() { err = errors.Join(err, encoder.AppendObject(HistogramDataPoint(hdps.At(i)))) } return err @@ -256,7 +256,7 @@ type ExponentialHistogramDataPointSlice pmetric.ExponentialHistogramDataPointSli func (e ExponentialHistogramDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { ehdps := pmetric.ExponentialHistogramDataPointSlice(e) var err error - for i := 0; i < ehdps.Len(); i++ { + for i := range ehdps.Len() { err = errors.Join(err, encoder.AppendObject(ExponentialHistogramDataPoint(ehdps.At(i)))) } return err @@ -297,7 +297,7 @@ type SummaryDataPointSlice pmetric.SummaryDataPointSlice func (s SummaryDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { sdps := pmetric.SummaryDataPointSlice(s) var err error - for i := 0; i < sdps.Len(); i++ { + for i := range sdps.Len() { err = errors.Join(err, encoder.AppendObject(SummaryDataPoint(sdps.At(i)))) } return err @@ -323,7 +323,7 @@ type SummaryDataPointValueAtQuantileSlice pmetric.SummaryDataPointValueAtQuantil func (s SummaryDataPointValueAtQuantileSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { qs := pmetric.SummaryDataPointValueAtQuantileSlice(s) var err error - for i := 0; i < qs.Len(); i++ { + for i := range qs.Len() { err = errors.Join(err, encoder.AppendObject(SummaryDataPointValueAtQuantile(qs.At(i)))) } return nil @@ -342,7 +342,7 @@ type UInt64Slice pcommon.UInt64Slice func (u UInt64Slice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { uis := pcommon.UInt64Slice(u) - for i := 0; i < uis.Len(); i++ { + for i := range uis.Len() { encoder.AppendUint64(uis.At(i)) } return nil @@ -352,7 +352,7 @@ type Float64Slice pcommon.Float64Slice func (f Float64Slice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { fs := pcommon.Float64Slice(f) - for i := 0; i < fs.Len(); i++ { + for i := range fs.Len() { encoder.AppendFloat64(fs.At(i)) } return nil @@ -363,7 +363,7 @@ type ExemplarSlice pmetric.ExemplarSlice func (e ExemplarSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { es := pmetric.ExemplarSlice(e) var err error - for i := 0; i < es.Len(); i++ { + for i := range es.Len() { ee := es.At(i) err = errors.Join(err, encoder.AppendObject(Exemplar(ee))) } diff --git a/pkg/ottl/contexts/internal/value.go b/pkg/ottl/contexts/internal/value.go index 34136fe89caf9..54b3daf6eb64e 100644 --- a/pkg/ottl/contexts/internal/value.go +++ b/pkg/ottl/contexts/internal/value.go @@ -71,7 +71,7 @@ func SetValue(value pcommon.Value, val any) error { func getIndexableValue[K any](ctx context.Context, tCtx K, value pcommon.Value, keys []ottl.Key[K]) (any, error) { val := value var ok bool - for index := 0; index < len(keys); index++ { + for index := range keys { switch val.Type() { case pcommon.ValueTypeMap: s, err := keys[index].String(ctx, tCtx) @@ -125,7 +125,7 @@ func setIndexableValue[K any](ctx context.Context, tCtx K, currentValue pcommon. return err } - for index := 0; index < len(keys); index++ { + for index := range keys { switch currentValue.Type() { case pcommon.ValueTypeMap: s, err := keys[index].String(ctx, tCtx) @@ -175,7 +175,7 @@ func setIndexableValue[K any](ctx context.Context, tCtx K, currentValue pcommon. currentValue = currentValue.SetEmptyMap().PutEmpty(*s) case i != nil: currentValue.SetEmptySlice() - for k := 0; k < int(*i); k++ { + for range *i { currentValue.Slice().AppendEmpty() } currentValue = currentValue.Slice().AppendEmpty() @@ -185,7 +185,7 @@ func setIndexableValue[K any](ctx context.Context, tCtx K, currentValue pcommon. switch { case errInt == nil: currentValue.SetEmptySlice() - for k := 0; k < int(*resInt); k++ { + for range *resInt { currentValue.Slice().AppendEmpty() } currentValue = currentValue.Slice().AppendEmpty() diff --git a/pkg/ottl/e2e/e2e_test.go b/pkg/ottl/e2e/e2e_test.go index b81a08df4dd7b..9f3ac822716e6 100644 --- a/pkg/ottl/e2e/e2e_test.go +++ b/pkg/ottl/e2e/e2e_test.go @@ -1491,7 +1491,7 @@ func Benchmark_XML_Functions(b *testing.B) { actualCtx := tCtxWithTestBody() b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _, _, _ = logStatements.Execute(context.Background(), actualCtx) } diff --git a/pkg/ottl/expression.go b/pkg/ottl/expression.go index a2e8d29e63c4b..0a73dd6ba1a7a 100644 --- a/pkg/ottl/expression.go +++ b/pkg/ottl/expression.go @@ -379,7 +379,7 @@ func (g StandardFunctionGetter[K]) Get(args Arguments) (Expr[K], error) { if fArgsVal.NumField() != argsVal.NumField() { return Expr[K]{}, fmt.Errorf("incorrect number of arguments. Expected: %d Received: %d", fArgsVal.NumField(), argsVal.NumField()) } - for i := 0; i < fArgsVal.NumField(); i++ { + for i := range fArgsVal.NumField() { field := argsVal.Field(i) fArgsVal.Field(i).Set(field) } diff --git a/pkg/ottl/functions.go b/pkg/ottl/functions.go index 3703be6a422ae..9dfa508914b75 100644 --- a/pkg/ottl/functions.go +++ b/pkg/ottl/functions.go @@ -340,7 +340,7 @@ func (p *Parser[K]) buildArgs(ed editor, argsVal reflect.Value) error { requiredArgs := 0 seenNamed := false - for i := 0; i < len(ed.Arguments); i++ { + for i := range len(ed.Arguments) { if !seenNamed && ed.Arguments[i].Name != "" { seenNamed = true } else if seenNamed && ed.Arguments[i].Name == "" { @@ -348,7 +348,7 @@ func (p *Parser[K]) buildArgs(ed editor, argsVal reflect.Value) error { } } - for i := 0; i < argsVal.NumField(); i++ { + for i := range argsVal.NumField() { if !strings.HasPrefix(argsVal.Field(i).Type().Name(), "Optional") { requiredArgs++ } @@ -659,7 +659,7 @@ func buildSlice[T any](argVal value, argType reflect.Type, buildArg buildArgFunc vals := []T{} values := argVal.List.Values - for j := 0; j < len(values); j++ { + for j := range values { untypedVal, err := buildArg(values[j], argType.Elem()) if err != nil { return nil, fmt.Errorf("error while parsing list argument at index %v: %w", j, err) diff --git a/pkg/ottl/functions_test.go b/pkg/ottl/functions_test.go index bb81c316dd272..684e06aa7478e 100644 --- a/pkg/ottl/functions_test.go +++ b/pkg/ottl/functions_test.go @@ -2009,7 +2009,7 @@ func createFactory[A any](name string, args A, fn any) Factory[any] { argsVal := reflect.ValueOf(fArgs).Elem() fnArgs := make([]reflect.Value, argsVal.NumField()) - for i := 0; i < argsVal.NumField(); i++ { + for i := range argsVal.NumField() { fnArgs[i] = argsVal.Field(i) } diff --git a/pkg/ottl/ottlfuncs/func_flatten.go b/pkg/ottl/ottlfuncs/func_flatten.go index b651cce4ce5f4..fd5a8793aad4d 100644 --- a/pkg/ottl/ottlfuncs/func_flatten.go +++ b/pkg/ottl/ottlfuncs/func_flatten.go @@ -71,7 +71,7 @@ func flattenMap(m pcommon.Map, result pcommon.Map, prefix string, currentDepth, } func flattenSlice(s pcommon.Slice, result pcommon.Map, prefix string, currentDepth int64, maxDepth int64) { - for i := 0; i < s.Len(); i++ { + for i := range s.Len() { flattenValue(fmt.Sprintf("%d", i), s.At(i), currentDepth+1, maxDepth, result, prefix) } } @@ -81,7 +81,7 @@ func flattenValue(k string, v pcommon.Value, currentDepth int64, maxDepth int64, case v.Type() == pcommon.ValueTypeMap && currentDepth < maxDepth: flattenMap(v.Map(), result, prefix+k, currentDepth+1, maxDepth) case v.Type() == pcommon.ValueTypeSlice && currentDepth < maxDepth: - for i := 0; i < v.Slice().Len(); i++ { + for i := range v.Slice().Len() { switch { case v.Slice().At(i).Type() == pcommon.ValueTypeMap && currentDepth+1 < maxDepth: flattenMap(v.Slice().At(i).Map(), result, fmt.Sprintf("%v.%v", prefix+k, i), currentDepth+2, maxDepth) diff --git a/pkg/ottl/ottlfuncs/func_len_test.go b/pkg/ottl/ottlfuncs/func_len_test.go index db11db23a6cd4..835b48e42ae66 100644 --- a/pkg/ottl/ottlfuncs/func_len_test.go +++ b/pkg/ottl/ottlfuncs/func_len_test.go @@ -44,103 +44,103 @@ func Test_Len(t *testing.T) { plogLogRecordSlice := plog.NewLogRecordSlice() plogLogRecordSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { plogLogRecordSlice.AppendEmpty() } plogResourceLogsSlice := plog.NewResourceLogsSlice() plogResourceLogsSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { plogResourceLogsSlice.AppendEmpty() } plogScopeLogsSlice := plog.NewScopeLogsSlice() - for i := 0; i < 5; i++ { + for range 5 { plogScopeLogsSlice.AppendEmpty() } plogScopeLogsSlice.EnsureCapacity(5) pmetricExemplarSlice := pmetric.NewExemplarSlice() pmetricExemplarSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { pmetricExemplarSlice.AppendEmpty() } pmetricExponentialHistogramDataPointSlice := pmetric.NewExponentialHistogramDataPointSlice() pmetricExponentialHistogramDataPointSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { pmetricExponentialHistogramDataPointSlice.AppendEmpty() } pmetricHistogramDataPointSlice := pmetric.NewHistogramDataPointSlice() pmetricHistogramDataPointSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { pmetricHistogramDataPointSlice.AppendEmpty() } pmetricMetricSlice := pmetric.NewMetricSlice() pmetricMetricSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { pmetricMetricSlice.AppendEmpty() } pmetricNumberDataPointSlice := pmetric.NewNumberDataPointSlice() pmetricNumberDataPointSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { pmetricNumberDataPointSlice.AppendEmpty() } pmetricResourceSlice := pmetric.NewResourceMetricsSlice() pmetricResourceSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { pmetricResourceSlice.AppendEmpty() } pmetricScopeMetricsSlice := pmetric.NewScopeMetricsSlice() pmetricScopeMetricsSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { pmetricScopeMetricsSlice.AppendEmpty() } pmetricSummaryDataPointSlice := pmetric.NewSummaryDataPointSlice() pmetricSummaryDataPointSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { pmetricSummaryDataPointSlice.AppendEmpty() } pmetricSummaryDataPointValueAtQuantileSlice := pmetric.NewSummaryDataPointValueAtQuantileSlice() pmetricSummaryDataPointValueAtQuantileSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { pmetricSummaryDataPointValueAtQuantileSlice.AppendEmpty() } ptraceResourceSpansSlice := ptrace.NewResourceSpansSlice() ptraceResourceSpansSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { ptraceResourceSpansSlice.AppendEmpty() } ptraceScopeSpansSlice := ptrace.NewScopeSpansSlice() ptraceScopeSpansSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { ptraceScopeSpansSlice.AppendEmpty() } ptraceSpanEventSlice := ptrace.NewSpanEventSlice() ptraceSpanEventSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { ptraceSpanEventSlice.AppendEmpty() } ptraceSpanLinkSlice := ptrace.NewSpanLinkSlice() ptraceSpanLinkSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { ptraceSpanLinkSlice.AppendEmpty() } ptraceSpanSlice := ptrace.NewSpanSlice() ptraceSpanSlice.EnsureCapacity(5) - for i := 0; i < 5; i++ { + for range 5 { ptraceSpanSlice.AppendEmpty() } @@ -297,7 +297,7 @@ func Test_Len(t *testing.T) { func dummyMap(size int) map[string]any { m := make(map[string]any, size) - for i := 0; i < size; i++ { + for i := range size { m[strconv.Itoa(i)] = i } return m diff --git a/pkg/ottl/ottlfuncs/func_parse_json_test.go b/pkg/ottl/ottlfuncs/func_parse_json_test.go index 4ef3f8fb49c2a..33fa4781c387a 100644 --- a/pkg/ottl/ottlfuncs/func_parse_json_test.go +++ b/pkg/ottl/ottlfuncs/func_parse_json_test.go @@ -250,7 +250,7 @@ func BenchmarkParseJSON(b *testing.B) { ctx := context.Background() b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _, err := parseJSON(ottl.StandardStringGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { return benchData, nil diff --git a/pkg/ottl/ottlfuncs/func_sort.go b/pkg/ottl/ottlfuncs/func_sort.go index 4c9f56c820cee..ff73421fb68fb 100644 --- a/pkg/ottl/ottlfuncs/func_sort.go +++ b/pkg/ottl/ottlfuncs/func_sort.go @@ -168,7 +168,7 @@ func findCommonValueType(slice pcommon.Slice) (pcommon.ValueType, bool) { wantStr := false wantDouble := false - for i := 0; i < length; i++ { + for i := range length { value := slice.At(i) currType := value.Type() @@ -226,7 +226,7 @@ type convertedValue[T targetType] struct { func makeConvertedCopy[T targetType](slice pcommon.Slice, converter func(idx int) T) []convertedValue[T] { length := slice.Len() var out []convertedValue[T] - for i := 0; i < length; i++ { + for i := range length { cv := convertedValue[T]{ value: converter(i), originalValue: slice.At(i).AsRaw(), diff --git a/pkg/pdatatest/plogtest/logs.go b/pkg/pdatatest/plogtest/logs.go index 1f1df88ef58b0..a23cf18931b4b 100644 --- a/pkg/pdatatest/plogtest/logs.go +++ b/pkg/pdatatest/plogtest/logs.go @@ -37,10 +37,10 @@ func CompareLogs(expected, actual plog.Logs, options ...CompareLogsOption) error var errs error var outOfOrderErrs error - for e := 0; e < numResources; e++ { + for e := range numResources { er := expectedLogs.At(e) var foundMatch bool - for a := 0; a < numResources; a++ { + for a := range numResources { ar := actualLogs.At(a) if _, ok := matchingResources[ar]; ok { continue @@ -61,7 +61,7 @@ func CompareLogs(expected, actual plog.Logs, options ...CompareLogsOption) error } } - for i := 0; i < numResources; i++ { + for i := range numResources { if _, ok := matchingResources[actualLogs.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected resource: %v", actualLogs.At(i).Resource().Attributes().AsRaw())) } @@ -105,10 +105,10 @@ func CompareResourceLogs(expected, actual plog.ResourceLogs) error { matchingScopeLogs := make(map[plog.ScopeLogs]plog.ScopeLogs, numScopeLogs) var outOfOrderErrs error - for e := 0; e < numScopeLogs; e++ { + for e := range numScopeLogs { esl := expected.ScopeLogs().At(e) var foundMatch bool - for a := 0; a < numScopeLogs; a++ { + for a := range numScopeLogs { asl := actual.ScopeLogs().At(a) if _, ok := matchingScopeLogs[asl]; ok { continue @@ -129,7 +129,7 @@ func CompareResourceLogs(expected, actual plog.ResourceLogs) error { } } - for i := 0; i < numScopeLogs; i++ { + for i := range numScopeLogs { if _, ok := matchingScopeLogs[actual.ScopeLogs().At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected scope: %s", actual.ScopeLogs().At(i).Scope().Name())) } @@ -142,7 +142,7 @@ func CompareResourceLogs(expected, actual plog.ResourceLogs) error { return outOfOrderErrs } - for i := 0; i < esls.Len(); i++ { + for i := range esls.Len() { errPrefix := fmt.Sprintf(`scope "%s"`, esls.At(i).Scope().Name()) errs = multierr.Append(errs, internal.AddErrPrefix(errPrefix, CompareScopeLogs(esls.At(i), asls.At(i)))) } @@ -170,10 +170,10 @@ func CompareScopeLogs(expected, actual plog.ScopeLogs) error { matchingLogRecords := make(map[plog.LogRecord]plog.LogRecord, numLogRecords) var outOfOrderErrs error - for e := 0; e < numLogRecords; e++ { + for e := range numLogRecords { elr := expected.LogRecords().At(e) var foundMatch bool - for a := 0; a < numLogRecords; a++ { + for a := range numLogRecords { alr := actual.LogRecords().At(a) if _, ok := matchingLogRecords[alr]; ok { continue @@ -194,7 +194,7 @@ func CompareScopeLogs(expected, actual plog.ScopeLogs) error { } } - for i := 0; i < numLogRecords; i++ { + for i := range numLogRecords { if _, ok := matchingLogRecords[actual.LogRecords().At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected log record: %v", actual.LogRecords().At(i).Attributes().AsRaw())) diff --git a/pkg/pdatatest/plogtest/options.go b/pkg/pdatatest/plogtest/options.go index 632b76297c992..83748f0bb024e 100644 --- a/pkg/pdatatest/plogtest/options.go +++ b/pkg/pdatatest/plogtest/options.go @@ -44,7 +44,7 @@ func (opt ignoreResourceAttributeValue) applyOnLogs(expected, actual plog.Logs) func (opt ignoreResourceAttributeValue) maskLogsResourceAttributeValue(logs plog.Logs) { rls := logs.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { internal.MaskResourceAttributeValue(rls.At(i).Resource(), opt.attributeName) } } @@ -68,11 +68,11 @@ func (opt ignoreLogRecordAttributeValue) applyOnLogs(expected, actual plog.Logs) func (opt ignoreLogRecordAttributeValue) maskLogRecordAttributeValue(logs plog.Logs) { rls := logs.ResourceLogs() - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { sls := rls.At(i).ScopeLogs() - for j := 0; j < sls.Len(); j++ { + for j := range sls.Len() { lrs := sls.At(j).LogRecords() - for k := 0; k < lrs.Len(); k++ { + for k := range lrs.Len() { lr := lrs.At(k) val, exists := lr.Attributes().Get(opt.attributeName) if exists { @@ -93,11 +93,11 @@ func IgnoreTimestamp() CompareLogsOption { func maskTimestamp(logs plog.Logs, ts pcommon.Timestamp) { rls := logs.ResourceLogs() - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { sls := rls.At(i).ScopeLogs() - for j := 0; j < sls.Len(); j++ { + for j := range sls.Len() { lrs := sls.At(j).LogRecords() - for k := 0; k < lrs.Len(); k++ { + for k := range lrs.Len() { lrs.At(k).SetTimestamp(ts) } } @@ -114,11 +114,11 @@ func IgnoreObservedTimestamp() CompareLogsOption { func maskObservedTimestamp(logs plog.Logs, ts pcommon.Timestamp) { rls := logs.ResourceLogs() - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { sls := rls.At(i).ScopeLogs() - for j := 0; j < sls.Len(); j++ { + for j := range sls.Len() { lrs := sls.At(j).LogRecords() - for k := 0; k < lrs.Len(); k++ { + for k := range lrs.Len() { lrs.At(k).SetObservedTimestamp(ts) } } @@ -153,7 +153,7 @@ func IgnoreScopeLogsOrder() CompareLogsOption { } func sortScopeLogsSlices(ls plog.Logs) { - for i := 0; i < ls.ResourceLogs().Len(); i++ { + for i := range ls.ResourceLogs().Len() { ls.ResourceLogs().At(i).ScopeLogs().Sort(func(a, b plog.ScopeLogs) bool { if a.SchemaUrl() != b.SchemaUrl() { return a.SchemaUrl() < b.SchemaUrl() @@ -175,8 +175,8 @@ func IgnoreLogRecordsOrder() CompareLogsOption { } func sortLogRecordSlices(ls plog.Logs) { - for i := 0; i < ls.ResourceLogs().Len(); i++ { - for j := 0; j < ls.ResourceLogs().At(i).ScopeLogs().Len(); j++ { + for i := range ls.ResourceLogs().Len() { + for j := range ls.ResourceLogs().At(i).ScopeLogs().Len() { ls.ResourceLogs().At(i).ScopeLogs().At(j).LogRecords().Sort(func(a, b plog.LogRecord) bool { if a.ObservedTimestamp() != b.ObservedTimestamp() { return a.ObservedTimestamp() < b.ObservedTimestamp() diff --git a/pkg/pdatatest/pmetrictest/metrics.go b/pkg/pdatatest/pmetrictest/metrics.go index 48a3cc971189e..a843c236f84e5 100644 --- a/pkg/pdatatest/pmetrictest/metrics.go +++ b/pkg/pdatatest/pmetrictest/metrics.go @@ -36,10 +36,10 @@ func CompareMetrics(expected, actual pmetric.Metrics, options ...CompareMetricsO var errs error var outOfOrderErrs error - for e := 0; e < numResources; e++ { + for e := range numResources { er := expectedMetrics.At(e) var foundMatch bool - for a := 0; a < numResources; a++ { + for a := range numResources { ar := actualMetrics.At(a) if _, ok := matchingResources[ar]; ok { continue @@ -61,7 +61,7 @@ func CompareMetrics(expected, actual pmetric.Metrics, options ...CompareMetricsO } } - for i := 0; i < numResources; i++ { + for i := range numResources { if _, ok := matchingResources[actualMetrics.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected resource: %v", actualMetrics.At(i).Resource().Attributes().AsRaw())) @@ -104,10 +104,10 @@ func CompareResourceMetrics(expected, actual pmetric.ResourceMetrics) error { matchingResources := make(map[pmetric.ScopeMetrics]pmetric.ScopeMetrics, numScopeMetrics) var outOfOrderErrs error - for e := 0; e < numScopeMetrics; e++ { + for e := range numScopeMetrics { esm := esms.At(e) var foundMatch bool - for a := 0; a < numScopeMetrics; a++ { + for a := range numScopeMetrics { asm := asms.At(a) if _, ok := matchingResources[asm]; ok { continue @@ -129,7 +129,7 @@ func CompareResourceMetrics(expected, actual pmetric.ResourceMetrics) error { } } - for i := 0; i < numScopeMetrics; i++ { + for i := range numScopeMetrics { if _, ok := matchingResources[actual.ScopeMetrics().At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected scope: %s", actual.ScopeMetrics().At(i).Scope().Name())) @@ -143,7 +143,7 @@ func CompareResourceMetrics(expected, actual pmetric.ResourceMetrics) error { return outOfOrderErrs } - for i := 0; i < esms.Len(); i++ { + for i := range esms.Len() { errPrefix := fmt.Sprintf(`scope "%s"`, esms.At(i).Scope().Name()) errs = multierr.Append(errs, internal.AddErrPrefix(errPrefix, CompareScopeMetrics(esms.At(i), asms.At(i)))) } @@ -175,10 +175,10 @@ func CompareScopeMetrics(expected, actual pmetric.ScopeMetrics) error { matchingMetrics := make(map[pmetric.Metric]pmetric.Metric, numMetrics) var outOfOrderErrs error - for e := 0; e < numMetrics; e++ { + for e := range numMetrics { em := ems.At(e) var foundMatch bool - for a := 0; a < numMetrics; a++ { + for a := range numMetrics { am := ams.At(a) if _, ok := matchingMetrics[am]; ok { continue @@ -199,7 +199,7 @@ func CompareScopeMetrics(expected, actual pmetric.ScopeMetrics) error { } } - for i := 0; i < numMetrics; i++ { + for i := range numMetrics { if _, ok := matchingMetrics[ams.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected metric: %s", ams.At(i).Name())) } @@ -212,7 +212,7 @@ func CompareScopeMetrics(expected, actual pmetric.ScopeMetrics) error { return outOfOrderErrs } - for i := 0; i < numMetrics; i++ { + for i := range numMetrics { errPrefix := fmt.Sprintf(`metric "%s"`, ems.At(i).Name()) errs = multierr.Append(errs, internal.AddErrPrefix(errPrefix, CompareMetric(ems.At(i), ams.At(i)))) } @@ -293,10 +293,10 @@ func compareNumberDataPointSlices(expected, actual pmetric.NumberDataPointSlice) var errs error var outOfOrderErrs error - for e := 0; e < numPoints; e++ { + for e := range numPoints { edp := expected.At(e) var foundMatch bool - for a := 0; a < numPoints; a++ { + for a := range numPoints { adp := actual.At(a) if _, ok := matchingDPS[adp]; ok { continue @@ -317,7 +317,7 @@ func compareNumberDataPointSlices(expected, actual pmetric.NumberDataPointSlice) } } - for i := 0; i < numPoints; i++ { + for i := range numPoints { if _, ok := matchingDPS[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected datapoint: %v", actual.At(i).Attributes().AsRaw())) } @@ -385,10 +385,10 @@ func compareExemplarSlice(expected, actual pmetric.ExemplarSlice) error { var errs error var outOfOrderErrs error - for e := 0; e < numExemplars; e++ { + for e := range numExemplars { eex := expected.At(e) var foundMatch bool - for a := 0; a < numExemplars; a++ { + for a := range numExemplars { aex := actual.At(a) if _, ok := matchingExs[aex]; ok { continue @@ -409,7 +409,7 @@ func compareExemplarSlice(expected, actual pmetric.ExemplarSlice) error { } } - for i := 0; i < numExemplars; i++ { + for i := range numExemplars { if _, ok := matchingExs[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected exemplar: %v", actual.At(i).FilteredAttributes().AsRaw())) @@ -476,10 +476,10 @@ func compareHistogramDataPointSlices(expected, actual pmetric.HistogramDataPoint var errs error var outOfOrderErrs error - for e := 0; e < numPoints; e++ { + for e := range numPoints { edp := expected.At(e) var foundMatch bool - for a := 0; a < numPoints; a++ { + for a := range numPoints { adp := actual.At(a) if _, ok := matchingDPS[adp]; ok { continue @@ -500,7 +500,7 @@ func compareHistogramDataPointSlices(expected, actual pmetric.HistogramDataPoint } } - for i := 0; i < numPoints; i++ { + for i := range numPoints { if _, ok := matchingDPS[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected datapoint: %v", actual.At(i).Attributes().AsRaw())) } @@ -611,10 +611,10 @@ func compareExponentialHistogramDataPointSlice(expected, actual pmetric.Exponent var errs error var outOfOrderErrs error - for e := 0; e < numPoints; e++ { + for e := range numPoints { edp := expected.At(e) var foundMatch bool - for a := 0; a < numPoints; a++ { + for a := range numPoints { adp := actual.At(a) if _, ok := matchingDPS[adp]; ok { continue @@ -635,7 +635,7 @@ func compareExponentialHistogramDataPointSlice(expected, actual pmetric.Exponent } } - for i := 0; i < numPoints; i++ { + for i := range numPoints { if _, ok := matchingDPS[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected datapoint: %v", actual.At(i).Attributes().AsRaw())) } @@ -759,10 +759,10 @@ func compareSummaryDataPointSlices(expected, actual pmetric.SummaryDataPointSlic matchingDPS := map[pmetric.SummaryDataPoint]pmetric.SummaryDataPoint{} var errs error var outOfOrderErrs error - for e := 0; e < numPoints; e++ { + for e := range numPoints { edp := expected.At(e) var foundMatch bool - for a := 0; a < numPoints; a++ { + for a := range numPoints { adp := actual.At(a) if _, ok := matchingDPS[adp]; ok { continue @@ -783,7 +783,7 @@ func compareSummaryDataPointSlices(expected, actual pmetric.SummaryDataPointSlic } } - for i := 0; i < numPoints; i++ { + for i := range numPoints { if _, ok := matchingDPS[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected datapoint: %v", actual.At(i).Attributes().AsRaw())) } @@ -834,7 +834,7 @@ func CompareSummaryDataPoint(expected, actual pmetric.SummaryDataPoint) error { return errs } - for i := 0; i < expected.QuantileValues().Len(); i++ { + for i := range expected.QuantileValues().Len() { eqv, acv := expected.QuantileValues().At(i), actual.QuantileValues().At(i) if eqv.Quantile() != acv.Quantile() { errs = multierr.Append(errs, fmt.Errorf("quantile doesn't match expected: %f, "+ diff --git a/pkg/pdatatest/pmetrictest/options.go b/pkg/pdatatest/pmetrictest/options.go index 3a6ce4cb96518..1a6aab95cffea 100644 --- a/pkg/pdatatest/pmetrictest/options.go +++ b/pkg/pdatatest/pmetrictest/options.go @@ -38,9 +38,9 @@ func IgnoreMetricValues(metricNames ...string) CompareMetricsOption { func maskMetricValues(metrics pmetric.Metrics, metricNames ...string) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { ilms := rms.At(i).ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { maskMetricSliceValues(ilms.At(j).Metrics(), metricNames...) } } @@ -52,7 +52,7 @@ func maskMetricSliceValues(metrics pmetric.MetricSlice, metricNames ...string) { for _, metricName := range metricNames { metricNameSet[metricName] = true } - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { if len(metricNames) == 0 || metricNameSet[metrics.At(i).Name()] { switch metrics.At(i).Type() { case pmetric.MetricTypeEmpty, pmetric.MetricTypeSum, pmetric.MetricTypeGauge: @@ -86,7 +86,7 @@ func getDataPointSlice(metric pmetric.Metric) pmetric.NumberDataPointSlice { // maskDataPointSliceValues sets all data point values to zero. func maskDataPointSliceValues(dataPoints pmetric.NumberDataPointSlice) { - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { dataPoint := dataPoints.At(i) dataPoint.SetIntValue(0) dataPoint.SetDoubleValue(0) @@ -95,7 +95,7 @@ func maskDataPointSliceValues(dataPoints pmetric.NumberDataPointSlice) { // maskHistogramDataPointSliceValues sets all data point values to zero. func maskHistogramDataPointSliceValues(dataPoints pmetric.HistogramDataPointSlice) { - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { dataPoint := dataPoints.At(i) dataPoint.SetCount(0) dataPoint.SetSum(0) @@ -119,9 +119,9 @@ func IgnoreMetricFloatPrecision(precision int, metricNames ...string) CompareMet func floatMetricValues(precision int, metrics pmetric.Metrics, metricNames ...string) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { ilms := rms.At(i).ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { floatMetricSliceValues(precision, ilms.At(j).Metrics(), metricNames...) } } @@ -133,7 +133,7 @@ func floatMetricSliceValues(precision int, metrics pmetric.MetricSlice, metricNa for _, metricName := range metricNames { metricNameSet[metricName] = true } - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { if len(metricNames) == 0 || metricNameSet[metrics.At(i).Name()] { switch metrics.At(i).Type() { case pmetric.MetricTypeEmpty, pmetric.MetricTypeSum, pmetric.MetricTypeGauge: @@ -147,7 +147,7 @@ func floatMetricSliceValues(precision int, metrics pmetric.MetricSlice, metricNa // maskDataPointSliceValues rounds all data point values at a given decimal. func roundDataPointSliceValues(dataPoints pmetric.NumberDataPointSlice, precision int) { - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { dataPoint := dataPoints.At(i) factor := math.Pow(10, float64(precision)) switch { @@ -170,30 +170,30 @@ func IgnoreTimestamp() CompareMetricsOption { func maskTimestamp(metrics pmetric.Metrics, ts pcommon.Timestamp) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { - for j := 0; j < rms.At(i).ScopeMetrics().Len(); j++ { - for k := 0; k < rms.At(i).ScopeMetrics().At(j).Metrics().Len(); k++ { + for i := range rms.Len() { + for j := range rms.At(i).ScopeMetrics().Len() { + for k := range rms.At(i).ScopeMetrics().At(j).Metrics().Len() { m := rms.At(i).ScopeMetrics().At(j).Metrics().At(k) //exhaustive:enforce switch m.Type() { case pmetric.MetricTypeGauge: - for l := 0; l < m.Gauge().DataPoints().Len(); l++ { + for l := range m.Gauge().DataPoints().Len() { m.Gauge().DataPoints().At(l).SetTimestamp(ts) } case pmetric.MetricTypeSum: - for l := 0; l < m.Sum().DataPoints().Len(); l++ { + for l := range m.Sum().DataPoints().Len() { m.Sum().DataPoints().At(l).SetTimestamp(ts) } case pmetric.MetricTypeHistogram: - for l := 0; l < m.Histogram().DataPoints().Len(); l++ { + for l := range m.Histogram().DataPoints().Len() { m.Histogram().DataPoints().At(l).SetTimestamp(ts) } case pmetric.MetricTypeExponentialHistogram: - for l := 0; l < m.ExponentialHistogram().DataPoints().Len(); l++ { + for l := range m.ExponentialHistogram().DataPoints().Len() { m.ExponentialHistogram().DataPoints().At(l).SetTimestamp(ts) } case pmetric.MetricTypeSummary: - for l := 0; l < m.Summary().DataPoints().Len(); l++ { + for l := range m.Summary().DataPoints().Len() { m.Summary().DataPoints().At(l).SetTimestamp(ts) } case pmetric.MetricTypeEmpty: @@ -214,30 +214,30 @@ func IgnoreStartTimestamp() CompareMetricsOption { func maskStartTimestamp(metrics pmetric.Metrics, ts pcommon.Timestamp) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { - for j := 0; j < rms.At(i).ScopeMetrics().Len(); j++ { - for k := 0; k < rms.At(i).ScopeMetrics().At(j).Metrics().Len(); k++ { + for i := range rms.Len() { + for j := range rms.At(i).ScopeMetrics().Len() { + for k := range rms.At(i).ScopeMetrics().At(j).Metrics().Len() { m := rms.At(i).ScopeMetrics().At(j).Metrics().At(k) //exhaustive:enforce switch m.Type() { case pmetric.MetricTypeGauge: - for l := 0; l < m.Gauge().DataPoints().Len(); l++ { + for l := range m.Gauge().DataPoints().Len() { m.Gauge().DataPoints().At(l).SetStartTimestamp(ts) } case pmetric.MetricTypeSum: - for l := 0; l < m.Sum().DataPoints().Len(); l++ { + for l := range m.Sum().DataPoints().Len() { m.Sum().DataPoints().At(l).SetStartTimestamp(ts) } case pmetric.MetricTypeHistogram: - for l := 0; l < m.Histogram().DataPoints().Len(); l++ { + for l := range m.Histogram().DataPoints().Len() { m.Histogram().DataPoints().At(l).SetStartTimestamp(ts) } case pmetric.MetricTypeExponentialHistogram: - for l := 0; l < m.ExponentialHistogram().DataPoints().Len(); l++ { + for l := range m.ExponentialHistogram().DataPoints().Len() { m.ExponentialHistogram().DataPoints().At(l).SetStartTimestamp(ts) } case pmetric.MetricTypeSummary: - for l := 0; l < m.Summary().DataPoints().Len(); l++ { + for l := range m.Summary().DataPoints().Len() { m.Summary().DataPoints().At(l).SetStartTimestamp(ts) } case pmetric.MetricTypeEmpty: @@ -265,35 +265,35 @@ func IgnoreDatapointAttributesOrder() CompareMetricsOption { func orderDatapointAttributes(metrics pmetric.Metrics) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { ilms := rms.At(i).ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { msl := ilms.At(j).Metrics() - for g := 0; g < msl.Len(); g++ { + for g := range msl.Len() { msl.At(g) switch msl.At(g).Type() { case pmetric.MetricTypeGauge: - for k := 0; k < msl.At(g).Gauge().DataPoints().Len(); k++ { + for k := range msl.At(g).Gauge().DataPoints().Len() { rawOrdered := internal.OrderMapByKey(msl.At(g).Gauge().DataPoints().At(k).Attributes().AsRaw()) _ = msl.At(g).Gauge().DataPoints().At(k).Attributes().FromRaw(rawOrdered) } case pmetric.MetricTypeSum: - for k := 0; k < msl.At(g).Sum().DataPoints().Len(); k++ { + for k := range msl.At(g).Sum().DataPoints().Len() { rawOrdered := internal.OrderMapByKey(msl.At(g).Sum().DataPoints().At(k).Attributes().AsRaw()) _ = msl.At(g).Sum().DataPoints().At(k).Attributes().FromRaw(rawOrdered) } case pmetric.MetricTypeHistogram: - for k := 0; k < msl.At(g).Histogram().DataPoints().Len(); k++ { + for k := range msl.At(g).Histogram().DataPoints().Len() { rawOrdered := internal.OrderMapByKey(msl.At(g).Histogram().DataPoints().At(k).Attributes().AsRaw()) _ = msl.At(g).Histogram().DataPoints().At(k).Attributes().FromRaw(rawOrdered) } case pmetric.MetricTypeExponentialHistogram: - for k := 0; k < msl.At(g).ExponentialHistogram().DataPoints().Len(); k++ { + for k := range msl.At(g).ExponentialHistogram().DataPoints().Len() { rawOrdered := internal.OrderMapByKey(msl.At(g).ExponentialHistogram().DataPoints().At(k).Attributes().AsRaw()) _ = msl.At(g).ExponentialHistogram().DataPoints().At(k).Attributes().FromRaw(rawOrdered) } case pmetric.MetricTypeSummary: - for k := 0; k < msl.At(g).Summary().DataPoints().Len(); k++ { + for k := range msl.At(g).Summary().DataPoints().Len() { rawOrdered := internal.OrderMapByKey(msl.At(g).Summary().DataPoints().At(k).Attributes().AsRaw()) _ = msl.At(g).Summary().DataPoints().At(k).Attributes().FromRaw(rawOrdered) } @@ -306,9 +306,9 @@ func orderDatapointAttributes(metrics pmetric.Metrics) { func maskMetricAttributeValue(metrics pmetric.Metrics, attributeName string, metricNames []string) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { ilms := rms.At(i).ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { maskMetricSliceAttributeValues(ilms.At(j).Metrics(), attributeName, metricNames) } } @@ -324,7 +324,7 @@ func maskMetricSliceAttributeValues(metrics pmetric.MetricSlice, attributeName s metricNameSet[metricName] = true } - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { if len(metricNames) == 0 || metricNameSet[metrics.At(i).Name()] { switch metrics.At(i).Type() { case pmetric.MetricTypeHistogram: @@ -379,7 +379,7 @@ func maskMetricSliceAttributeValues(metrics pmetric.MetricSlice, attributeName s // maskDataPointSliceAttributeValues sets the value of the specified attribute to // the zero value associated with the attribute data type. func maskDataPointSliceAttributeValues(dataPoints pmetric.NumberDataPointSlice, attributeName string) { - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { attributes := dataPoints.At(i).Attributes() attribute, ok := attributes.Get(attributeName) if ok { @@ -402,7 +402,7 @@ func maskDataPointSliceAttributeValues(dataPoints pmetric.NumberDataPointSlice, // maskHistogramSliceAttributeValues sets the value of the specified attribute to // the zero value associated with the attribute data type. func maskHistogramSliceAttributeValues(dataPoints pmetric.HistogramDataPointSlice, attributeName string) { - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { attributes := dataPoints.At(i).Attributes() attribute, ok := attributes.Get(attributeName) if ok { @@ -433,9 +433,9 @@ func MatchMetricAttributeValue(attributeName string, pattern string, metricNames func matchMetricAttributeValue(metrics pmetric.Metrics, attributeName string, re *regexp.Regexp, metricNames []string) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { ilms := rms.At(i).ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { matchMetricSliceAttributeValues(ilms.At(j).Metrics(), attributeName, re, metricNames) } } @@ -447,7 +447,7 @@ func matchMetricSliceAttributeValues(metrics pmetric.MetricSlice, attributeName metricNameSet[metricName] = true } - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { if len(metricNames) == 0 || metricNameSet[metrics.At(i).Name()] { switch metrics.At(i).Type() { case pmetric.MetricTypeHistogram: @@ -500,7 +500,7 @@ func matchMetricSliceAttributeValues(metrics pmetric.MetricSlice, attributeName } func matchDataPointSliceAttributeValues(dataPoints pmetric.NumberDataPointSlice, attributeName string, re *regexp.Regexp) { - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { attributes := dataPoints.At(i).Attributes() attribute, ok := attributes.Get(attributeName) if ok { @@ -513,7 +513,7 @@ func matchDataPointSliceAttributeValues(dataPoints pmetric.NumberDataPointSlice, } func matchHistogramDataPointSliceAttributeValues(dataPoints pmetric.HistogramDataPointSlice, attributeName string, re *regexp.Regexp) { - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { attributes := dataPoints.At(i).Attributes() attribute, ok := attributes.Get(attributeName) if ok { @@ -536,7 +536,7 @@ func MatchResourceAttributeValue(attributeName string, pattern string) CompareMe func matchResourceAttributeValue(metrics pmetric.Metrics, attributeName string, re *regexp.Regexp) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { internal.MatchResourceAttributeValue(rms.At(i).Resource(), attributeName, re) } } @@ -552,7 +552,7 @@ func IgnoreResourceAttributeValue(attributeName string) CompareMetricsOption { func maskMetricsResourceAttributeValue(metrics pmetric.Metrics, attributeName string) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { internal.MaskResourceAttributeValue(rms.At(i).Resource(), attributeName) } } @@ -566,7 +566,7 @@ func ChangeResourceAttributeValue(attributeName string, changeFn func(string) st func changeMetricsResourceAttributeValue(metrics pmetric.Metrics, attributeName string, changeFn func(string) string) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { internal.ChangeResourceAttributeValue(rms.At(i).Resource(), attributeName, changeFn) } } @@ -586,11 +586,11 @@ func maskSubsequentDataPoints(metrics pmetric.Metrics, metricNames []string) { } rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { sms := rms.At(i).ScopeMetrics() - for j := 0; j < sms.Len(); j++ { + for j := range sms.Len() { ms := sms.At(j).Metrics() - for k := 0; k < ms.Len(); k++ { + for k := range ms.Len() { if len(metricNames) == 0 || metricNameSet[ms.At(k).Name()] { switch ms.At(k).Type() { case pmetric.MetricTypeHistogram: @@ -642,9 +642,9 @@ func IgnoreScopeVersion() CompareMetricsOption { func maskScopeVersion(metrics pmetric.Metrics) { rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { rm := rms.At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { sm := rm.ScopeMetrics().At(j) sm.Scope().SetVersion("") } @@ -660,7 +660,7 @@ func IgnoreScopeMetricsOrder() CompareMetricsOption { } func sortScopeMetricsSlices(ms pmetric.Metrics) { - for i := 0; i < ms.ResourceMetrics().Len(); i++ { + for i := range ms.ResourceMetrics().Len() { ms.ResourceMetrics().At(i).ScopeMetrics().Sort(func(a, b pmetric.ScopeMetrics) bool { if a.SchemaUrl() != b.SchemaUrl() { return a.SchemaUrl() < b.SchemaUrl() @@ -682,8 +682,8 @@ func IgnoreMetricsOrder() CompareMetricsOption { } func sortMetricSlices(ms pmetric.Metrics) { - for i := 0; i < ms.ResourceMetrics().Len(); i++ { - for j := 0; j < ms.ResourceMetrics().At(i).ScopeMetrics().Len(); j++ { + for i := range ms.ResourceMetrics().Len() { + for j := range ms.ResourceMetrics().At(i).ScopeMetrics().Len() { ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().Sort(func(a, b pmetric.Metric) bool { return a.Name() < b.Name() }) @@ -700,9 +700,9 @@ func IgnoreMetricDataPointsOrder() CompareMetricsOption { } func sortMetricDataPointSlices(ms pmetric.Metrics) { - for i := 0; i < ms.ResourceMetrics().Len(); i++ { - for j := 0; j < ms.ResourceMetrics().At(i).ScopeMetrics().Len(); j++ { - for k := 0; k < ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().Len(); k++ { + for i := range ms.ResourceMetrics().Len() { + for j := range ms.ResourceMetrics().At(i).ScopeMetrics().Len() { + for k := range ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().Len() { m := ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().At(k) //exhaustive:enforce switch m.Type() { @@ -764,12 +764,12 @@ func IgnoreSummaryDataPointValueAtQuantileSliceOrder() CompareMetricsOption { } func sortSummaryDataPointValueAtQuantileSlices(ms pmetric.Metrics) { - for i := 0; i < ms.ResourceMetrics().Len(); i++ { - for j := 0; j < ms.ResourceMetrics().At(i).ScopeMetrics().Len(); j++ { - for k := 0; k < ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().Len(); k++ { + for i := range ms.ResourceMetrics().Len() { + for j := range ms.ResourceMetrics().At(i).ScopeMetrics().Len() { + for k := range ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().Len() { m := ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().At(k) if m.Type() == pmetric.MetricTypeSummary { - for l := 0; l < m.Summary().DataPoints().Len(); l++ { + for l := range m.Summary().DataPoints().Len() { m.Summary().DataPoints().At(l).QuantileValues().Sort(func(a, b pmetric.SummaryDataPointValueAtQuantile) bool { return a.Quantile() < b.Quantile() }) diff --git a/pkg/pdatatest/pprofiletest/options.go b/pkg/pdatatest/pprofiletest/options.go index 056fac9a3be0b..db10eff712020 100644 --- a/pkg/pdatatest/pprofiletest/options.go +++ b/pkg/pdatatest/pprofiletest/options.go @@ -44,7 +44,7 @@ func (opt ignoreResourceAttributeValue) applyOnProfiles(expected, actual pprofil func (opt ignoreResourceAttributeValue) maskProfilesResourceAttributeValue(profiles pprofile.Profiles) { rls := profiles.ResourceProfiles() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { internal.MaskResourceAttributeValue(rls.At(i).Resource(), opt.attributeName) } } @@ -68,9 +68,9 @@ func (opt ignoreScopeAttributeValue) applyOnProfiles(expected, actual pprofile.P func (opt ignoreScopeAttributeValue) maskProfilesScopeAttributeValue(profiles pprofile.Profiles) { rls := profiles.ResourceProfiles() - for i := 0; i < profiles.ResourceProfiles().Len(); i++ { + for i := range profiles.ResourceProfiles().Len() { sls := rls.At(i).ScopeProfiles() - for j := 0; j < sls.Len(); j++ { + for j := range sls.Len() { lr := sls.At(j) val, exists := lr.Scope().Attributes().Get(opt.attributeName) if exists { @@ -99,13 +99,13 @@ func (opt ignoreProfileAttributeValue) applyOnProfiles(expected, actual pprofile func (opt ignoreProfileAttributeValue) maskProfileAttributeValue(profiles pprofile.Profiles) { rls := profiles.ResourceProfiles() - for i := 0; i < profiles.ResourceProfiles().Len(); i++ { + for i := range profiles.ResourceProfiles().Len() { sls := rls.At(i).ScopeProfiles() - for j := 0; j < sls.Len(); j++ { + for j := range sls.Len() { lrs := sls.At(j).Profiles() - for k := 0; k < lrs.Len(); k++ { + for k := range lrs.Len() { lr := lrs.At(k) - for l := 0; l < lr.AttributeTable().Len(); l++ { + for l := range lr.AttributeTable().Len() { a := lr.AttributeTable().At(l) if a.Key() == opt.attributeName { a.Value().SetEmptyBytes() @@ -131,11 +131,11 @@ func (opt ignoreProfileTimestampValues) applyOnProfiles(expected, actual pprofil func (opt ignoreProfileTimestampValues) maskProfileTimestampValues(profiles pprofile.Profiles) { rls := profiles.ResourceProfiles() - for i := 0; i < profiles.ResourceProfiles().Len(); i++ { + for i := range profiles.ResourceProfiles().Len() { sls := rls.At(i).ScopeProfiles() - for j := 0; j < sls.Len(); j++ { + for j := range sls.Len() { lrs := sls.At(j).Profiles() - for k := 0; k < lrs.Len(); k++ { + for k := range lrs.Len() { lr := lrs.At(k) lr.SetStartTime(pcommon.NewTimestampFromTime(time.Time{})) lr.SetDuration(pcommon.NewTimestampFromTime(time.Time{})) @@ -172,7 +172,7 @@ func IgnoreScopeProfilesOrder() CompareProfilesOption { } func sortScopeProfilesSlices(ls pprofile.Profiles) { - for i := 0; i < ls.ResourceProfiles().Len(); i++ { + for i := range ls.ResourceProfiles().Len() { ls.ResourceProfiles().At(i).ScopeProfiles().Sort(func(a, b pprofile.ScopeProfiles) bool { if a.SchemaUrl() != b.SchemaUrl() { return a.SchemaUrl() < b.SchemaUrl() @@ -194,8 +194,8 @@ func IgnoreProfilesOrder() CompareProfilesOption { } func sortProfileSlices(ls pprofile.Profiles) { - for i := 0; i < ls.ResourceProfiles().Len(); i++ { - for j := 0; j < ls.ResourceProfiles().At(i).ScopeProfiles().Len(); j++ { + for i := range ls.ResourceProfiles().Len() { + for j := range ls.ResourceProfiles().At(i).ScopeProfiles().Len() { ls.ResourceProfiles().At(i).ScopeProfiles().At(j).Profiles().Sort(func(a, b pprofile.Profile) bool { if a.StartTime() != b.StartTime() { return a.StartTime() < b.StartTime() diff --git a/pkg/pdatatest/pprofiletest/profiles.go b/pkg/pdatatest/pprofiletest/profiles.go index 15dcbc61d8b5a..fba711bc2c592 100644 --- a/pkg/pdatatest/pprofiletest/profiles.go +++ b/pkg/pdatatest/pprofiletest/profiles.go @@ -46,10 +46,10 @@ func CompareProfiles(expected, actual pprofile.Profiles, options ...CompareProfi var errs error var outOfOrderErrs error - for e := 0; e < numResources; e++ { + for e := range numResources { er := expectedProfiles.At(e) var foundMatch bool - for a := 0; a < numResources; a++ { + for a := range numResources { ar := actualProfiles.At(a) if _, ok := matchingResources[ar]; ok { continue @@ -70,7 +70,7 @@ func CompareProfiles(expected, actual pprofile.Profiles, options ...CompareProfi } } - for i := 0; i < numResources; i++ { + for i := range numResources { if _, ok := matchingResources[actualProfiles.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected resource: %v", actualProfiles.At(i).Resource().Attributes().AsRaw())) } @@ -114,10 +114,10 @@ func CompareResourceProfiles(expected, actual pprofile.ResourceProfiles) error { matchingScopeProfiles := make(map[pprofile.ScopeProfiles]pprofile.ScopeProfiles, numScopeProfiles) var outOfOrderErrs error - for e := 0; e < numScopeProfiles; e++ { + for e := range numScopeProfiles { esl := expected.ScopeProfiles().At(e) var foundMatch bool - for a := 0; a < numScopeProfiles; a++ { + for a := range numScopeProfiles { asl := actual.ScopeProfiles().At(a) if _, ok := matchingScopeProfiles[asl]; ok { continue @@ -138,7 +138,7 @@ func CompareResourceProfiles(expected, actual pprofile.ResourceProfiles) error { } } - for i := 0; i < numScopeProfiles; i++ { + for i := range numScopeProfiles { if _, ok := matchingScopeProfiles[actual.ScopeProfiles().At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected scope: %s", actual.ScopeProfiles().At(i).Scope().Name())) } @@ -151,7 +151,7 @@ func CompareResourceProfiles(expected, actual pprofile.ResourceProfiles) error { return outOfOrderErrs } - for i := 0; i < esls.Len(); i++ { + for i := range esls.Len() { errPrefix := fmt.Sprintf(`scope "%s"`, esls.At(i).Scope().Name()) errs = multierr.Append(errs, internal.AddErrPrefix(errPrefix, CompareScopeProfiles(esls.At(i), asls.At(i)))) } @@ -179,12 +179,12 @@ func CompareScopeProfiles(expected, actual pprofile.ScopeProfiles) error { matchingProfiles := make(map[pprofile.Profile]pprofile.Profile, numProfiles) var outOfOrderErrs error - for e := 0; e < numProfiles; e++ { + for e := range numProfiles { elr := expected.Profiles().At(e) em := profileAttributesToMap(elr) var foundMatch bool - for a := 0; a < numProfiles; a++ { + for a := range numProfiles { alr := actual.Profiles().At(a) if _, ok := matchingProfiles[alr]; ok { continue @@ -207,7 +207,7 @@ func CompareScopeProfiles(expected, actual pprofile.ScopeProfiles) error { } } - for i := 0; i < numProfiles; i++ { + for i := range numProfiles { if _, ok := matchingProfiles[actual.Profiles().At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected profile: %v", profileAttributesToMap(actual.Profiles().At(i)))) @@ -331,10 +331,10 @@ func CompareProfileValueTypeSlice(expected, actual pprofile.ValueTypeSlice) erro matchingValueTypes := make(map[pprofile.ValueType]pprofile.ValueType, numValueTypes) var outOfOrderErrs error - for e := 0; e < numValueTypes; e++ { + for e := range numValueTypes { elr := expected.At(e) var foundMatch bool - for a := 0; a < numValueTypes; a++ { + for a := range numValueTypes { alr := actual.At(a) if _, ok := matchingValueTypes[alr]; ok { continue @@ -355,7 +355,7 @@ func CompareProfileValueTypeSlice(expected, actual pprofile.ValueTypeSlice) erro } } - for i := 0; i < numValueTypes; i++ { + for i := range numValueTypes { if _, ok := matchingValueTypes[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf(`unexpected valueType "unit: %d, type: %d, aggregationTemporality: %d"`, actual.At(i).UnitStrindex(), actual.At(i).TypeStrindex(), actual.At(i).AggregationTemporality())) @@ -399,10 +399,10 @@ func CompareProfileSampleSlice(expected, actual pprofile.SampleSlice) error { matchingItems := make(map[pprofile.Sample]pprofile.Sample, numSlice) var outOfOrderErrs error - for e := 0; e < numSlice; e++ { + for e := range numSlice { elr := expected.At(e) var foundMatch bool - for a := 0; a < numSlice; a++ { + for a := range numSlice { alr := actual.At(a) if _, ok := matchingItems[alr]; ok { continue @@ -423,7 +423,7 @@ func CompareProfileSampleSlice(expected, actual pprofile.SampleSlice) error { } } - for i := 0; i < numSlice; i++ { + for i := range numSlice { if _, ok := matchingItems[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf(`unexpected sample "attributes: %v"`, actual.At(i).AttributeIndices().AsRaw())) @@ -487,10 +487,10 @@ func CompareProfileMappingSlice(expected, actual pprofile.MappingSlice) error { matchingItems := make(map[pprofile.Mapping]pprofile.Mapping, numItems) var outOfOrderErrs error - for e := 0; e < numItems; e++ { + for e := range numItems { elr := expected.At(e) var foundMatch bool - for a := 0; a < numItems; a++ { + for a := range numItems { alr := actual.At(a) if _, ok := matchingItems[alr]; ok { continue @@ -511,7 +511,7 @@ func CompareProfileMappingSlice(expected, actual pprofile.MappingSlice) error { } } - for i := 0; i < numItems; i++ { + for i := range numItems { if _, ok := matchingItems[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf(`unexpected profile mapping "attributes: %v"`, actual.At(i).AttributeIndices().AsRaw())) @@ -560,10 +560,10 @@ func CompareProfileFunctionSlice(expected, actual pprofile.FunctionSlice) error matchingItems := make(map[pprofile.Function]pprofile.Function, numItems) var outOfOrderErrs error - for e := 0; e < numItems; e++ { + for e := range numItems { elr := expected.At(e) var foundMatch bool - for a := 0; a < numItems; a++ { + for a := range numItems { alr := actual.At(a) if _, ok := matchingItems[alr]; ok { continue @@ -584,7 +584,7 @@ func CompareProfileFunctionSlice(expected, actual pprofile.FunctionSlice) error } } - for i := 0; i < numItems; i++ { + for i := range numItems { if _, ok := matchingItems[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf(`unexpected profile function "name: %d"`, actual.At(i).NameStrindex())) @@ -627,10 +627,10 @@ func CompareProfileLocationSlice(expected, actual pprofile.LocationSlice) error matchingItems := make(map[pprofile.Location]pprofile.Location, numItems) var outOfOrderErrs error - for e := 0; e < numItems; e++ { + for e := range numItems { elr := expected.At(e) var foundMatch bool - for a := 0; a < numItems; a++ { + for a := range numItems { alr := actual.At(a) if _, ok := matchingItems[alr]; ok { continue @@ -651,7 +651,7 @@ func CompareProfileLocationSlice(expected, actual pprofile.LocationSlice) error } } - for i := 0; i < numItems; i++ { + for i := range numItems { if _, ok := matchingItems[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf(`unexpected location "attributes: %v"`, actual.At(i).AttributeIndices().AsRaw())) @@ -711,10 +711,10 @@ func CompareProfileLineSlice(expected, actual pprofile.LineSlice) error { matchingItems := make(map[pprofile.Line]pprofile.Line, numItems) var outOfOrderErrs error - for e := 0; e < numItems; e++ { + for e := range numItems { elr := expected.At(e) var foundMatch bool - for a := 0; a < numItems; a++ { + for a := range numItems { alr := actual.At(a) if _, ok := matchingItems[alr]; ok { continue @@ -735,7 +735,7 @@ func CompareProfileLineSlice(expected, actual pprofile.LineSlice) error { } } - for i := 0; i < numItems; i++ { + for i := range numItems { if _, ok := matchingItems[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf(`unexpected profile line "functionIndex: %d"`, actual.At(i).FunctionIndex())) @@ -777,10 +777,10 @@ func CompareProfileAttributeUnitSlice(expected, actual pprofile.AttributeUnitSli matchingItems := make(map[pprofile.AttributeUnit]pprofile.AttributeUnit, numItems) var outOfOrderErrs error - for e := 0; e < numItems; e++ { + for e := range numItems { elr := expected.At(e) var foundMatch bool - for a := 0; a < numItems; a++ { + for a := range numItems { alr := actual.At(a) if _, ok := matchingItems[alr]; ok { continue @@ -801,7 +801,7 @@ func CompareProfileAttributeUnitSlice(expected, actual pprofile.AttributeUnitSli } } - for i := 0; i < numItems; i++ { + for i := range numItems { if _, ok := matchingItems[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf(`unexpected profile attributeUnit "attributeKey: %d"`, actual.At(i).AttributeKeyStrindex())) @@ -831,10 +831,10 @@ func CompareProfileLinkSlice(expected, actual pprofile.LinkSlice) error { matchingItems := make(map[pprofile.Link]pprofile.Link, numItems) var outOfOrderErrs error - for e := 0; e < numItems; e++ { + for e := range numItems { elr := expected.At(e) var foundMatch bool - for a := 0; a < numItems; a++ { + for a := range numItems { alr := actual.At(a) if _, ok := matchingItems[alr]; ok { continue @@ -855,7 +855,7 @@ func CompareProfileLinkSlice(expected, actual pprofile.LinkSlice) error { } } - for i := 0; i < numItems; i++ { + for i := range numItems { if _, ok := matchingItems[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf(`unexpected profile link "spanId: %s, traceId: %s"`, actual.At(i).SpanID().String(), actual.At(i).TraceID().String())) diff --git a/pkg/pdatatest/ptracetest/options.go b/pkg/pdatatest/ptracetest/options.go index ff4bdb1d1feb2..47c960268632d 100644 --- a/pkg/pdatatest/ptracetest/options.go +++ b/pkg/pdatatest/ptracetest/options.go @@ -36,7 +36,7 @@ func IgnoreResourceAttributeValue(attributeName string) CompareTracesOption { func maskTracesResourceAttributeValue(traces ptrace.Traces, attributeName string) { rss := traces.ResourceSpans() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { internal.MaskResourceAttributeValue(rss.At(i).Resource(), attributeName) } } @@ -69,7 +69,7 @@ func IgnoreScopeSpansOrder() CompareTracesOption { } func sortScopeSpansSlices(ts ptrace.Traces) { - for i := 0; i < ts.ResourceSpans().Len(); i++ { + for i := range ts.ResourceSpans().Len() { ts.ResourceSpans().At(i).ScopeSpans().Sort(func(a, b ptrace.ScopeSpans) bool { if a.SchemaUrl() != b.SchemaUrl() { return a.SchemaUrl() < b.SchemaUrl() @@ -91,8 +91,8 @@ func IgnoreSpansOrder() CompareTracesOption { } func sortSpanSlices(ts ptrace.Traces) { - for i := 0; i < ts.ResourceSpans().Len(); i++ { - for j := 0; j < ts.ResourceSpans().At(i).ScopeSpans().Len(); j++ { + for i := range ts.ResourceSpans().Len() { + for j := range ts.ResourceSpans().At(i).ScopeSpans().Len() { ts.ResourceSpans().At(i).ScopeSpans().At(j).Spans().Sort(func(a, b ptrace.Span) bool { if a.Kind() != b.Kind() { return a.Kind() < b.Kind() @@ -139,11 +139,11 @@ func IgnoreSpanID() CompareTracesOption { } func maskSpanID(traces ptrace.Traces, spanID pcommon.SpanID) { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rs := traces.ResourceSpans().At(i) - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { ss := rs.ScopeSpans().At(j) - for k := 0; k < ss.Spans().Len(); k++ { + for k := range ss.Spans().Len() { span := ss.Spans().At(k) span.SetSpanID(spanID) } @@ -160,11 +160,11 @@ func IgnoreSpanAttributeValue(attributeName string) CompareTracesOption { } func maskSpanAttributeValue(traces ptrace.Traces, attributeName string) { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rs := traces.ResourceSpans().At(i) - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { ss := rs.ScopeSpans().At(j) - for k := 0; k < ss.Spans().Len(); k++ { + for k := range ss.Spans().Len() { span := ss.Spans().At(k) if _, ok := span.Attributes().Get(attributeName); ok { span.Attributes().PutStr(attributeName, "*") @@ -183,9 +183,9 @@ func IgnoreScopeSpanInstrumentationScopeName() CompareTracesOption { } func maskScopeSpanInstrumentationScopeName(traces ptrace.Traces) { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rs := traces.ResourceSpans().At(i) - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { ss := rs.ScopeSpans().At(j) ss.Scope().SetName("") } @@ -201,9 +201,9 @@ func IgnoreScopeSpanInstrumentationScopeVersion() CompareTracesOption { } func maskScopeSpanInstrumentationScopeVersion(traces ptrace.Traces) { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rs := traces.ResourceSpans().At(i) - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { ss := rs.ScopeSpans().At(j) ss.Scope().SetVersion("") } @@ -219,9 +219,9 @@ func IgnoreScopeSpanInstrumentationScopeAttributeValue(attributeName string) Com } func maskScopeSpanInstrumentationScopeAttributeValue(traces ptrace.Traces, attributeName string) { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rs := traces.ResourceSpans().At(i) - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { ss := rs.ScopeSpans().At(j) if _, ok := ss.Scope().Attributes().Get(attributeName); ok { ss.Scope().Attributes().PutStr(attributeName, "*") @@ -240,11 +240,11 @@ func IgnoreStartTimestamp() CompareTracesOption { } func maskStartTimestamp(traces ptrace.Traces, ts pcommon.Timestamp) { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rs := traces.ResourceSpans().At(i) - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { ss := rs.ScopeSpans().At(j) - for k := 0; k < ss.Spans().Len(); k++ { + for k := range ss.Spans().Len() { span := ss.Spans().At(k) span.SetStartTimestamp(ts) } @@ -262,11 +262,11 @@ func IgnoreEndTimestamp() CompareTracesOption { } func maskEndTimestamp(traces ptrace.Traces, ts pcommon.Timestamp) { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rs := traces.ResourceSpans().At(i) - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { ss := rs.ScopeSpans().At(j) - for k := 0; k < ss.Spans().Len(); k++ { + for k := range ss.Spans().Len() { span := ss.Spans().At(k) span.SetEndTimestamp(ts) } @@ -284,11 +284,11 @@ func IgnoreTraceID() CompareTracesOption { } func maskTraceID(traces ptrace.Traces, traceID pcommon.TraceID) { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rs := traces.ResourceSpans().At(i) - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { ss := rs.ScopeSpans().At(j) - for k := 0; k < ss.Spans().Len(); k++ { + for k := range ss.Spans().Len() { span := ss.Spans().At(k) span.SetTraceID(traceID) } diff --git a/pkg/pdatatest/ptracetest/traces.go b/pkg/pdatatest/ptracetest/traces.go index 087f9e5a3029b..a48dd49e35a0f 100644 --- a/pkg/pdatatest/ptracetest/traces.go +++ b/pkg/pdatatest/ptracetest/traces.go @@ -38,10 +38,10 @@ func CompareTraces(expected, actual ptrace.Traces, options ...CompareTracesOptio var errs error var outOfOrderErrs error - for e := 0; e < numResources; e++ { + for e := range numResources { er := expectedSpans.At(e) var foundMatch bool - for a := 0; a < numResources; a++ { + for a := range numResources { ar := actualSpans.At(a) if _, ok := matchingResources[ar]; ok { continue @@ -62,7 +62,7 @@ func CompareTraces(expected, actual ptrace.Traces, options ...CompareTracesOptio } } - for i := 0; i < numResources; i++ { + for i := range numResources { if _, ok := matchingResources[actualSpans.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected resource: %v", actualSpans.At(i).Resource().Attributes().AsRaw())) } @@ -103,10 +103,10 @@ func CompareResourceSpans(expected, actual ptrace.ResourceSpans) error { matchingScopeSpans := make(map[ptrace.ScopeSpans]ptrace.ScopeSpans, numScopeSpans) var outOfOrderErrs error - for e := 0; e < numScopeSpans; e++ { + for e := range numScopeSpans { es := expected.ScopeSpans().At(e) var foundMatch bool - for a := 0; a < numScopeSpans; a++ { + for a := range numScopeSpans { as := actual.ScopeSpans().At(a) if _, ok := matchingScopeSpans[as]; ok { continue @@ -127,7 +127,7 @@ func CompareResourceSpans(expected, actual ptrace.ResourceSpans) error { } } - for i := 0; i < numScopeSpans; i++ { + for i := range numScopeSpans { if _, ok := matchingScopeSpans[actual.ScopeSpans().At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected scope: %s", actual.ScopeSpans().At(i).Scope().Name())) } @@ -140,7 +140,7 @@ func CompareResourceSpans(expected, actual ptrace.ResourceSpans) error { return outOfOrderErrs } - for i := 0; i < expected.ScopeSpans().Len(); i++ { + for i := range expected.ScopeSpans().Len() { errPrefix := fmt.Sprintf(`scope "%s"`, expected.ScopeSpans().At(i).Scope().Name()) errs = multierr.Append(errs, internal.AddErrPrefix(errPrefix, CompareScopeSpans(expected.ScopeSpans().At(i), actual.ScopeSpans().At(i)))) @@ -168,10 +168,10 @@ func CompareScopeSpans(expected, actual ptrace.ScopeSpans) error { matchingSpans := make(map[ptrace.Span]ptrace.Span, numSpans) var outOfOrderErrs error - for e := 0; e < numSpans; e++ { + for e := range numSpans { es := expected.Spans().At(e) var foundMatch bool - for a := 0; a < numSpans; a++ { + for a := range numSpans { as := actual.Spans().At(a) if _, ok := matchingSpans[as]; ok { continue @@ -192,7 +192,7 @@ func CompareScopeSpans(expected, actual ptrace.ScopeSpans) error { } } - for i := 0; i < numSpans; i++ { + for i := range numSpans { if _, ok := matchingSpans[actual.Spans().At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected span: %s", actual.Spans().At(i).Name())) } @@ -302,10 +302,10 @@ func compareSpanEventSlice(expected, actual ptrace.SpanEventSlice) (errs error) matchingSpanEvents := make(map[ptrace.SpanEvent]ptrace.SpanEvent, numSpanEvents) var outOfOrderErrs error - for e := 0; e < numSpanEvents; e++ { + for e := range numSpanEvents { ee := expected.At(e) var foundMatch bool - for a := 0; a < numSpanEvents; a++ { + for a := range numSpanEvents { ae := actual.At(a) if _, ok := matchingSpanEvents[ae]; ok { continue @@ -326,7 +326,7 @@ func compareSpanEventSlice(expected, actual ptrace.SpanEventSlice) (errs error) } } - for i := 0; i < numSpanEvents; i++ { + for i := range numSpanEvents { if _, ok := matchingSpanEvents[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected span event: %s", actual.At(i).Name())) } @@ -382,10 +382,10 @@ func compareSpanLinkSlice(expected, actual ptrace.SpanLinkSlice) (errs error) { matchingSpanLinks := make(map[ptrace.SpanLink]ptrace.SpanLink, numSpanLinks) var outOfOrderErrs error - for e := 0; e < numSpanLinks; e++ { + for e := range numSpanLinks { el := expected.At(e) var foundMatch bool - for a := 0; a < numSpanLinks; a++ { + for a := range numSpanLinks { al := actual.At(a) if _, ok := matchingSpanLinks[al]; ok { continue @@ -406,7 +406,7 @@ func compareSpanLinkSlice(expected, actual ptrace.SpanLinkSlice) (errs error) { } } - for i := 0; i < numSpanLinks; i++ { + for i := range numSpanLinks { if _, ok := matchingSpanLinks[actual.At(i)]; !ok { errs = multierr.Append(errs, fmt.Errorf("unexpected span link: %s", actual.At(i).SpanID())) } diff --git a/pkg/pdatautil/hash.go b/pkg/pdatautil/hash.go index 172789c607bf1..550b19f920bd4 100644 --- a/pkg/pdatautil/hash.go +++ b/pkg/pdatautil/hash.go @@ -173,7 +173,7 @@ func (hw *hashWriter) writeValueHash(v pcommon.Value) { case pcommon.ValueTypeSlice: sl := v.Slice() hw.byteBuf = append(hw.byteBuf, valSlicePrefix...) - for i := 0; i < sl.Len(); i++ { + for i := range sl.Len() { hw.writeValueHash(sl.At(i)) } hw.byteBuf = append(hw.byteBuf, valSliceSuffix...) diff --git a/pkg/pdatautil/hash_test.go b/pkg/pdatautil/hash_test.go index a725676f37ac1..279d4ef3f5103 100644 --- a/pkg/pdatautil/hash_test.go +++ b/pkg/pdatautil/hash_test.go @@ -135,7 +135,7 @@ func TestMapHash(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - for i := 0; i < len(tt.maps); i++ { + for i := range len(tt.maps) { for j := i + 1; j < len(tt.maps); j++ { if tt.equal { assert.Equal(t, MapHash(tt.maps[i]), MapHash(tt.maps[j]), @@ -285,7 +285,7 @@ func TestValueHash(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - for i := 0; i < len(tt.values); i++ { + for i := range len(tt.values) { for j := i + 1; j < len(tt.values); j++ { if tt.equal { assert.Equal(t, ValueHash(tt.values[i]), ValueHash(tt.values[j]), @@ -342,7 +342,7 @@ func BenchmarkMapHashFourItems(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { MapHash(m) } } @@ -360,7 +360,7 @@ func BenchmarkMapHashEightItems(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { MapHash(m) } } @@ -386,7 +386,7 @@ func BenchmarkMapHashWithEmbeddedSliceAndMap(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { MapHash(m) } } diff --git a/pkg/resourcetotelemetry/resource_to_telemetry.go b/pkg/resourcetotelemetry/resource_to_telemetry.go index 571f89b0c47ea..2d66c393eade2 100644 --- a/pkg/resourcetotelemetry/resource_to_telemetry.go +++ b/pkg/resourcetotelemetry/resource_to_telemetry.go @@ -48,14 +48,14 @@ func WrapMetricsExporter(set Settings, exporter exporter.Metrics) exporter.Metri func convertToMetricsAttributes(md pmetric.Metrics) pmetric.Metrics { rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { resource := rms.At(i).Resource() ilms := rms.At(i).ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { ilm := ilms.At(j) metricSlice := ilm.Metrics() - for k := 0; k < metricSlice.Len(); k++ { + for k := range metricSlice.Len() { addAttributesToMetric(metricSlice.At(k), resource.Attributes()) } } @@ -81,25 +81,25 @@ func addAttributesToMetric(metric pmetric.Metric, labelMap pcommon.Map) { } func addAttributesToNumberDataPoints(ps pmetric.NumberDataPointSlice, newAttributeMap pcommon.Map) { - for i := 0; i < ps.Len(); i++ { + for i := range ps.Len() { joinAttributeMaps(newAttributeMap, ps.At(i).Attributes()) } } func addAttributesToHistogramDataPoints(ps pmetric.HistogramDataPointSlice, newAttributeMap pcommon.Map) { - for i := 0; i < ps.Len(); i++ { + for i := range ps.Len() { joinAttributeMaps(newAttributeMap, ps.At(i).Attributes()) } } func addAttributesToSummaryDataPoints(ps pmetric.SummaryDataPointSlice, newAttributeMap pcommon.Map) { - for i := 0; i < ps.Len(); i++ { + for i := range ps.Len() { joinAttributeMaps(newAttributeMap, ps.At(i).Attributes()) } } func addAttributesToExponentialHistogramDataPoints(ps pmetric.ExponentialHistogramDataPointSlice, newAttributeMap pcommon.Map) { - for i := 0; i < ps.Len(); i++ { + for i := range ps.Len() { joinAttributeMaps(newAttributeMap, ps.At(i).Attributes()) } } diff --git a/pkg/resourcetotelemetry/resource_to_telemetry_test.go b/pkg/resourcetotelemetry/resource_to_telemetry_test.go index 6587d71ad86a8..6c25d203f4831 100644 --- a/pkg/resourcetotelemetry/resource_to_telemetry_test.go +++ b/pkg/resourcetotelemetry/resource_to_telemetry_test.go @@ -104,7 +104,7 @@ func BenchmarkJoinAttributes(b *testing.B) { b.Run(tt.name, func(b *testing.B) { b.ResetTimer() from := initMetricAttributes(tt.args.from, 0) - for i := 0; i < b.N; i++ { + for range b.N { to := initMetricAttributes(tt.args.to, tt.args.from) joinAttributeMaps(from, to) } @@ -115,7 +115,7 @@ func BenchmarkJoinAttributes(b *testing.B) { func initMetricAttributes(capacity int, idx int) pcommon.Map { dest := pcommon.NewMap() dest.EnsureCapacity(capacity) - for i := 0; i < capacity; i++ { + for i := range capacity { dest.PutStr(fmt.Sprintf("label-name-for-index-%d", i+idx), fmt.Sprintf("label-value-for-index-%d", i+idx)) } return dest diff --git a/pkg/sampling/encoding_test.go b/pkg/sampling/encoding_test.go index d0553a6923644..1f28ec5e5c4ca 100644 --- a/pkg/sampling/encoding_test.go +++ b/pkg/sampling/encoding_test.go @@ -287,7 +287,7 @@ func BenchmarkThresholdCompareAsUint64(b *testing.B) { b.ResetTimer() yes := 0 no := 0 - for i := 0; i < b.N; i++ { + for i := range b.N { idx := i % len(tids) tid := tids[idx] comp := comps[idx] diff --git a/pkg/stanza/adapter/benchmark_test.go b/pkg/stanza/adapter/benchmark_test.go index 1d80b5a7b6231..56e30871a5e4e 100644 --- a/pkg/stanza/adapter/benchmark_test.go +++ b/pkg/stanza/adapter/benchmark_test.go @@ -51,7 +51,7 @@ type benchCase struct { } func (bc benchCase) run(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { f := NewFactory(BenchReceiverType{}, component.StabilityLevelUndefined) cfg := f.CreateDefaultConfig().(*BenchConfig) cfg.BaseConfig.maxBatchSize = bc.maxBatchSize @@ -184,7 +184,7 @@ func (b *Input) Start(_ operator.Persister) error { b.wg.Add(1) go func() { defer b.wg.Done() - for n := 0; n < len(b.entries); n++ { + for n := range len(b.entries) { select { case <-ctx.Done(): return diff --git a/pkg/stanza/adapter/converter_test.go b/pkg/stanza/adapter/converter_test.go index 66846e0a3915a..d76accace3c13 100644 --- a/pkg/stanza/adapter/converter_test.go +++ b/pkg/stanza/adapter/converter_test.go @@ -22,7 +22,7 @@ func BenchmarkConvertSimple(b *testing.B) { ent := entry.New() b.StartTimer() - for i := 0; i < b.N; i++ { + for range b.N { convert(ent) } } @@ -32,14 +32,14 @@ func BenchmarkConvertComplex(b *testing.B) { ent := complexEntry() b.StartTimer() - for i := 0; i < b.N; i++ { + for range b.N { convert(ent) } } func complexEntriesForNDifferentHosts(count int, n int) []*entry.Entry { ret := make([]*entry.Entry, count) - for i := 0; i < count; i++ { + for i := range count { e := entry.New() e.Severity = entry.Error e.Resource = map[string]any{ @@ -83,8 +83,8 @@ func complexEntriesForNDifferentHosts(count int, n int) []*entry.Entry { func complexEntriesForNDifferentHostsMDifferentScopes(count int, n int, m int) []*entry.Entry { ret := make([]*entry.Entry, count) - for i := 0; i < count; i++ { - for j := 0; j < m; j++ { + for i := range count { + for range m { e := entry.New() e.Severity = entry.Error e.Resource = map[string]any{ @@ -391,7 +391,7 @@ func TestAllConvertedEntriesScopeGrouping(t *testing.T) { ills := rLog.ScopeLogs() require.Equal(t, ills.Len(), tc.numberOFScopes) - for i := 0; i < tc.numberOFScopes; i++ { + for i := range tc.numberOFScopes { sl := ills.At(i) require.Equal(t, sl.Scope().Name(), fmt.Sprintf("scope-%d", i%tc.numberOFScopes)) require.Equal(t, sl.LogRecords().Len(), tc.logsPerScope) @@ -796,7 +796,7 @@ func BenchmarkConverter(b *testing.B) { for _, wc := range workerCounts { b.Run(fmt.Sprintf("worker_count=%d", wc), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { b.ReportAllocs() for from := 0; from < entryCount; from += int(batchSize) { @@ -818,7 +818,7 @@ func BenchmarkGetResourceID(b *testing.B) { res := getResource() b.ReportAllocs() b.StartTimer() - for i := 0; i < b.N; i++ { + for range b.N { HashResource(res) } } @@ -827,7 +827,7 @@ func BenchmarkGetResourceIDEmptyResource(b *testing.B) { res := map[string]any{} b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { HashResource(res) } } @@ -838,7 +838,7 @@ func BenchmarkGetResourceIDSingleResource(b *testing.B) { } b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { HashResource(res) } } @@ -853,7 +853,7 @@ func BenchmarkGetResourceIDComplexResource(b *testing.B) { } b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { HashResource(res) } } diff --git a/pkg/stanza/adapter/frompdataconverter.go b/pkg/stanza/adapter/frompdataconverter.go index f294cdb3a03e1..75be2e216cb9f 100644 --- a/pkg/stanza/adapter/frompdataconverter.go +++ b/pkg/stanza/adapter/frompdataconverter.go @@ -75,7 +75,7 @@ func NewFromPdataConverter(set component.TelemetrySettings, workerCount int) *Fr func (c *FromPdataConverter) Start() { c.set.Logger.Debug("Starting log converter from pdata", zap.Int("worker_count", cap(c.workerChan))) - for i := 0; i < cap(c.workerChan); i++ { + for range cap(c.workerChan) { c.wg.Add(1) go c.workerLoop() } @@ -127,9 +127,9 @@ func (c *FromPdataConverter) workerLoop() { // Batch takes in an set of plog.Logs and sends it to an available worker for processing. func (c *FromPdataConverter) Batch(pLogs plog.Logs) error { - for i := 0; i < pLogs.ResourceLogs().Len(); i++ { + for i := range pLogs.ResourceLogs().Len() { rls := pLogs.ResourceLogs().At(i) - for j := 0; j < rls.ScopeLogs().Len(); j++ { + for j := range rls.ScopeLogs().Len() { scope := rls.ScopeLogs().At(j) item := fromConverterWorkerItem{ Resource: rls.Resource(), @@ -151,7 +151,7 @@ func (c *FromPdataConverter) Batch(pLogs plog.Logs) error { // convertFromLogs converts the contents of a fromConverterWorkerItem into a slice of entry.Entry func convertFromLogs(workerItem fromConverterWorkerItem) []*entry.Entry { result := make([]*entry.Entry, 0, workerItem.LogRecordSlice.Len()) - for i := 0; i < workerItem.LogRecordSlice.Len(); i++ { + for i := range workerItem.LogRecordSlice.Len() { record := workerItem.LogRecordSlice.At(i) entry := entry.Entry{} diff --git a/pkg/stanza/adapter/frompdataconverter_test.go b/pkg/stanza/adapter/frompdataconverter_test.go index 4e46a6ea5bca9..ace2d3458d6ff 100644 --- a/pkg/stanza/adapter/frompdataconverter_test.go +++ b/pkg/stanza/adapter/frompdataconverter_test.go @@ -32,7 +32,7 @@ func complexPdataForNDifferentHosts(count int, n int) plog.Logs { pLogs := plog.NewLogs() logs := pLogs.ResourceLogs() - for i := 0; i < count; i++ { + for i := range count { rls := logs.AppendEmpty() resource := rls.Resource() @@ -125,7 +125,7 @@ func BenchmarkFromPdataConverter(b *testing.B) { for _, wc := range workerCounts { b.Run(fmt.Sprintf("worker_count=%d", wc), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { converter := NewFromPdataConverter(componenttest.NewNopTelemetrySettings(), wc) converter.Start() defer converter.Stop() diff --git a/pkg/stanza/adapter/integration_test.go b/pkg/stanza/adapter/integration_test.go index 51d7676f355c7..7798fb3f40152 100644 --- a/pkg/stanza/adapter/integration_test.go +++ b/pkg/stanza/adapter/integration_test.go @@ -81,7 +81,7 @@ func BenchmarkEmitterToConsumer(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { cl.Reset() go func() { @@ -118,7 +118,7 @@ func BenchmarkEmitterToConsumerScopeGroupping(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { cl.Reset() go func() { diff --git a/pkg/stanza/adapter/receiver_test.go b/pkg/stanza/adapter/receiver_test.go index 1e08e9498e24e..1fe1115324c0a 100644 --- a/pkg/stanza/adapter/receiver_test.go +++ b/pkg/stanza/adapter/receiver_test.go @@ -249,7 +249,7 @@ func benchmarkReceiver(b *testing.B, logsPerIteration int) { require.NoError(b, rcv.Start(context.Background(), nil)) - for i := 0; i < b.N; i++ { + for range b.N { nextIteration <- struct{}{} <-iterationComplete mockConsumer.receivedLogs.Store(0) @@ -324,7 +324,7 @@ pipeline: // Populate the file that will be consumed file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0o666) require.NoError(b, err) - for i := 0; i < b.N; i++ { + for range b.N { _, err := file.WriteString("testlog\n") require.NoError(b, err) } @@ -386,7 +386,7 @@ func BenchmarkParseAndMap(b *testing.B) { // Populate the file that will be consumed file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0o666) require.NoError(b, err) - for i := 0; i < b.N; i++ { + for i := range b.N { _, err := file.WriteString(fmt.Sprintf("10.33.121.119 - - [11/Aug/2020:00:00:00 -0400] \"GET /index.html HTTP/1.1\" 404 %d\n", i%1000)) require.NoError(b, err) } @@ -459,7 +459,7 @@ func (t *testInputOperator) Start(_ operator.Persister) error { for { select { case <-t.nextIteration: - for i := 0; i < t.numberOfLogEntries; i++ { + for range t.numberOfLogEntries { _ = t.Write(context.Background(), e) } case <-ctx.Done(): diff --git a/pkg/stanza/fileconsumer/attrs/attrs_test.go b/pkg/stanza/fileconsumer/attrs/attrs_test.go index 097eb6e4bd094..fe0432bd317be 100644 --- a/pkg/stanza/fileconsumer/attrs/attrs_test.go +++ b/pkg/stanza/fileconsumer/attrs/attrs_test.go @@ -17,7 +17,7 @@ import ( func TestResolver(t *testing.T) { t.Parallel() - for i := 0; i < 64; i++ { + for i := range 64 { // Create a 6 bit string where each bit represents the value of a config option bitString := fmt.Sprintf("%06b", i) diff --git a/pkg/stanza/fileconsumer/benchmark_test.go b/pkg/stanza/fileconsumer/benchmark_test.go index 20ecfc980f150..4ed0b001be2db 100644 --- a/pkg/stanza/fileconsumer/benchmark_test.go +++ b/pkg/stanza/fileconsumer/benchmark_test.go @@ -156,7 +156,7 @@ func BenchmarkFileInput(b *testing.B) { // and to reduce the amount of syscalls in the benchmark. uniqueLines := 10 severalLines := "" - for i := 0; i < uniqueLines; i++ { + for range uniqueLines { severalLines += string(filetest.TokenWithLength(999)) + "\n" } @@ -171,7 +171,7 @@ func BenchmarkFileInput(b *testing.B) { _, err := f.WriteString(f.Name() + "\n") require.NoError(b, err) // Write half the content before starting the benchmark - for i := 0; i < b.N/2; i++ { + for range b.N / 2 { _, err := f.WriteString(severalLines) require.NoError(b, err) } @@ -210,7 +210,7 @@ func BenchmarkFileInput(b *testing.B) { go func(f *os.File) { defer wg.Done() // Write the other half of the content while running - for i := 0; i < b.N/2; i++ { + for range b.N / 2 { _, err := f.WriteString(severalLines) assert.NoError(b, err) } @@ -222,7 +222,7 @@ func BenchmarkFileInput(b *testing.B) { } // Timer continues to run until all files have been read - for dones := 0; dones < len(files); dones++ { + for range len(files) { <-doneChan } wg.Wait() diff --git a/pkg/stanza/fileconsumer/file_test.go b/pkg/stanza/fileconsumer/file_test.go index 78c3b6789576e..7b9540fa84c1e 100644 --- a/pkg/stanza/fileconsumer/file_test.go +++ b/pkg/stanza/fileconsumer/file_test.go @@ -664,19 +664,19 @@ func TestMultiFileParallel_PreloadedFiles(t *testing.T) { numMessages := 100 expected := make([][]byte, 0, numFiles*numMessages) - for i := 0; i < numFiles; i++ { - for j := 0; j < numMessages; j++ { + for i := range numFiles { + for j := range numMessages { expected = append(expected, []byte(getMessage(i, j))) } } var wg sync.WaitGroup - for i := 0; i < numFiles; i++ { + for i := range numFiles { temp := filetest.OpenTemp(t, tempDir) wg.Add(1) go func(tf *os.File, f int) { defer wg.Done() - for j := 0; j < numMessages; j++ { + for j := range numMessages { filetest.WriteString(t, tf, getMessage(f, j)+"\n") } }(temp, i) @@ -705,8 +705,8 @@ func TestMultiFileParallel_LiveFiles(t *testing.T) { numMessages := 100 expected := make([][]byte, 0, numFiles*numMessages) - for i := 0; i < numFiles; i++ { - for j := 0; j < numMessages; j++ { + for i := range numFiles { + for j := range numMessages { expected = append(expected, []byte(getMessage(i, j))) } } @@ -717,7 +717,7 @@ func TestMultiFileParallel_LiveFiles(t *testing.T) { }() temps := make([]*os.File, 0, numFiles) - for i := 0; i < numFiles; i++ { + for range numFiles { temps = append(temps, filetest.OpenTemp(t, tempDir)) } @@ -726,7 +726,7 @@ func TestMultiFileParallel_LiveFiles(t *testing.T) { wg.Add(1) go func(tf *os.File, f int) { defer wg.Done() - for j := 0; j < numMessages; j++ { + for j := range numMessages { filetest.WriteString(t, tf, getMessage(f, j)+"\n") } }(temp, i) @@ -799,7 +799,7 @@ func TestManyLogsDelivered(t *testing.T) { count := 1000 expectedTokens := make([]string, 0, count) - for i := 0; i < count; i++ { + for i := range count { expectedTokens = append(expectedTokens, strconv.Itoa(i)) } @@ -842,14 +842,14 @@ func TestFileBatching(t *testing.T) { operator.persister = testutil.NewUnscopedMockPersister() temps := make([]*os.File, 0, files) - for i := 0; i < files; i++ { + for range files { temps = append(temps, filetest.OpenTemp(t, tempDir)) } // Write logs to each file expectedTokens := make([][]byte, 0, files*linesPerFile) for i, temp := range temps { - for j := 0; j < linesPerFile; j++ { + for j := range linesPerFile { message := fmt.Sprintf("%s %d %d", filetest.TokenWithLength(100), i, j) _, err := temp.WriteString(message + "\n") require.NoError(t, err) @@ -866,7 +866,7 @@ func TestFileBatching(t *testing.T) { // Write more logs to each file so we can validate that all files are still known expectedTokens = make([][]byte, 0, files*linesPerFile) for i, temp := range temps { - for j := 0; j < linesPerFile; j++ { + for j := range linesPerFile { message := fmt.Sprintf("%s %d %d", filetest.TokenWithLength(20), i, j) _, err := temp.WriteString(message + "\n") require.NoError(t, err) @@ -897,7 +897,7 @@ func TestFileBatchingRespectsStartAtEnd(t *testing.T) { operator.persister = testutil.NewUnscopedMockPersister() temps := make([]*os.File, 0, initFiles+moreFiles) - for i := 0; i < initFiles; i++ { + for range initFiles { temps = append(temps, filetest.OpenTemp(t, tempDir)) } @@ -913,7 +913,7 @@ func TestFileBatchingRespectsStartAtEnd(t *testing.T) { sink.ExpectNoCalls(t) // Create some more files - for i := 0; i < moreFiles; i++ { + for range moreFiles { temps = append(temps, filetest.OpenTemp(t, tempDir)) } @@ -1023,14 +1023,14 @@ func TestDeleteAfterRead(t *testing.T) { tempDir := t.TempDir() temps := make([]*os.File, 0, files) - for i := 0; i < files; i++ { + for range files { temps = append(temps, filetest.OpenTemp(t, tempDir)) } expectedTokens := make([][]byte, 0, totalLines) actualTokens := make([][]byte, 0, totalLines) for i, temp := range temps { - for j := 0; j < linesPerFile; j++ { + for j := range linesPerFile { line := filetest.TokenWithLength(100) message := fmt.Sprintf("%s %d %d", line, i, j) _, err := temp.WriteString(message + "\n") @@ -1060,14 +1060,14 @@ func TestDeleteAfterRead(t *testing.T) { // Make more files to ensure deleted files do not cause problems on next poll temps = make([]*os.File, 0, files) - for i := 0; i < files; i++ { + for range files { temps = append(temps, filetest.OpenTemp(t, tempDir)) } expectedTokens = make([][]byte, 0, totalLines) actualTokens = make([][]byte, 0, totalLines) for i, temp := range temps { - for j := 0; j < linesPerFile; j++ { + for j := range linesPerFile { line := filetest.TokenWithLength(200) message := fmt.Sprintf("%s %d %d", line, i, j) _, err := temp.WriteString(message + "\n") @@ -1109,14 +1109,14 @@ func TestMaxBatching(t *testing.T) { operator.persister = testutil.NewUnscopedMockPersister() temps := make([]*os.File, 0, files) - for i := 0; i < files; i++ { + for range files { temps = append(temps, filetest.OpenTemp(t, tempDir)) } // Write logs to each file numExpectedTokens := expectedMaxFilesPerPoll * linesPerFile for i, temp := range temps { - for j := 0; j < linesPerFile; j++ { + for j := range linesPerFile { message := fmt.Sprintf("%s %d %d", filetest.TokenWithLength(100), i, j) _, err := temp.WriteString(message + "\n") require.NoError(t, err) @@ -1131,7 +1131,7 @@ func TestMaxBatching(t *testing.T) { // Write more logs to each file so we can validate that all files are still known for i, temp := range temps { - for j := 0; j < linesPerFile; j++ { + for j := range linesPerFile { message := fmt.Sprintf("%s %d %d", filetest.TokenWithLength(20), i, j) _, err := temp.WriteString(message + "\n") require.NoError(t, err) @@ -1194,7 +1194,7 @@ func TestDeleteAfterRead_SkipPartials(t *testing.T) { require.NoError(t, shortFile.Close()) longFile := filetest.OpenTemp(t, tempDir) - for line := 0; line < longFileLines; line++ { + for range longFileLines { _, err := longFile.WriteString(string(filetest.TokenWithLength(100)) + "\n") require.NoError(t, err) } @@ -1507,7 +1507,7 @@ func TestNoTracking(t *testing.T) { func symlinkTestCreateLogFile(t *testing.T, tempDir string, fileIdx, numLogLines int) (tokens [][]byte) { logFilePath := fmt.Sprintf("%s/%d.log", tempDir, fileIdx) temp1 := filetest.OpenFile(t, logFilePath) - for i := 0; i < numLogLines; i++ { + for i := range numLogLines { msg := fmt.Sprintf("[fileIdx %2d] This is a simple log line with the number %3d", fileIdx, i) filetest.WriteString(t, temp1, msg+"\n") tokens = append(tokens, []byte(msg)) diff --git a/pkg/stanza/fileconsumer/internal/checkpoint/checkpoint.go b/pkg/stanza/fileconsumer/internal/checkpoint/checkpoint.go index 4727500b61f03..27a9d6c738d57 100644 --- a/pkg/stanza/fileconsumer/internal/checkpoint/checkpoint.go +++ b/pkg/stanza/fileconsumer/internal/checkpoint/checkpoint.go @@ -73,7 +73,7 @@ func LoadKey(ctx context.Context, persister operator.Persister, key string) ([]* // Decode each of the known files var errs []error rmds := make([]*reader.Metadata, 0, knownFileCount) - for i := 0; i < knownFileCount; i++ { + for range knownFileCount { rmd := new(reader.Metadata) if err = dec.Decode(rmd); err != nil { return nil, err diff --git a/pkg/stanza/fileconsumer/internal/emittest/sink.go b/pkg/stanza/fileconsumer/internal/emittest/sink.go index 34d0f7f95219a..96c6cc21a7198 100644 --- a/pkg/stanza/fileconsumer/internal/emittest/sink.go +++ b/pkg/stanza/fileconsumer/internal/emittest/sink.go @@ -70,7 +70,7 @@ func (s *Sink) NextToken(t *testing.T) []byte { func (s *Sink) NextTokens(t *testing.T, n int) [][]byte { emitChan := make([][]byte, 0, n) - for i := 0; i < n; i++ { + for range n { select { case token := <-s.emitChan: emitChan = append(emitChan, token.Body) @@ -103,7 +103,7 @@ func (s *Sink) ExpectToken(t *testing.T, expected []byte) { func (s *Sink) ExpectTokens(t *testing.T, expected ...[]byte) { actual := make([][]byte, 0, len(expected)) - for i := 0; i < len(expected); i++ { + for i := range len(expected) { select { case token := <-s.emitChan: actual = append(actual, token.Body) @@ -127,7 +127,7 @@ func (s *Sink) ExpectCall(t *testing.T, expected []byte, attrs map[string]any) { func (s *Sink) ExpectCalls(t *testing.T, expected ...emit.Token) { actual := make([]emit.Token, 0, len(expected)) - for i := 0; i < len(expected); i++ { + for i := range len(expected) { select { case call := <-s.emitChan: actual = append(actual, call) diff --git a/pkg/stanza/fileconsumer/internal/emittest/sink_test.go b/pkg/stanza/fileconsumer/internal/emittest/sink_test.go index a246b16b58204..03ec4437470c6 100644 --- a/pkg/stanza/fileconsumer/internal/emittest/sink_test.go +++ b/pkg/stanza/fileconsumer/internal/emittest/sink_test.go @@ -38,7 +38,7 @@ func TestNextTokenTimeout(t *testing.T) { func TestNextTokens(t *testing.T) { s, testCalls := sinkTest(t) - for i := 0; i < 5; i++ { + for i := range 5 { tokens := s.NextTokens(t, 2) assert.Equal(t, testCalls[2*i].Body, tokens[0]) assert.Equal(t, testCalls[2*i+1].Body, tokens[1]) @@ -47,7 +47,7 @@ func TestNextTokens(t *testing.T) { func TestNextTokensTimeout(t *testing.T) { s, testCalls := sinkTest(t, WithTimeout(10*time.Millisecond)) - for i := 0; i < 5; i++ { + for i := range 5 { tokens := s.NextTokens(t, 2) assert.Equal(t, testCalls[2*i].Body, tokens[0]) assert.Equal(t, testCalls[2*i+1].Body, tokens[1]) @@ -103,14 +103,14 @@ func TestExpectTokenTimeout(t *testing.T) { func TestExpectTokens(t *testing.T) { s, testCalls := sinkTest(t) - for i := 0; i < 5; i++ { + for i := range 5 { s.ExpectTokens(t, testCalls[2*i].Body, testCalls[2*i+1].Body) } } func TestExpectTokensTimeout(t *testing.T) { s, testCalls := sinkTest(t, WithTimeout(10*time.Millisecond)) - for i := 0; i < 5; i++ { + for i := range 5 { s.ExpectTokens(t, testCalls[2*i].Body, testCalls[2*i+1].Body) } @@ -186,7 +186,7 @@ func TestExpectNoCallsFailure(t *testing.T) { func TestWithCallBuffer(t *testing.T) { s, testCalls := sinkTest(t, WithCallBuffer(5)) - for i := 0; i < 10; i++ { + for i := range 10 { s.ExpectCall(t, testCalls[i].Body, testCalls[i].Attributes) } } @@ -194,7 +194,7 @@ func TestWithCallBuffer(t *testing.T) { func sinkTest(t *testing.T, opts ...SinkOpt) (*Sink, []emit.Token) { s := NewSink(opts...) testCalls := make([]emit.Token, 0, 10) - for i := 0; i < 10; i++ { + for i := range 10 { testCalls = append(testCalls, emit.Token{ Body: []byte(fmt.Sprintf("token-%d", i)), Attributes: map[string]any{ diff --git a/pkg/stanza/fileconsumer/internal/fingerprint/fingerprint.go b/pkg/stanza/fileconsumer/internal/fingerprint/fingerprint.go index a61346d8db1cd..4a713a9f2ef28 100644 --- a/pkg/stanza/fileconsumer/internal/fingerprint/fingerprint.go +++ b/pkg/stanza/fileconsumer/internal/fingerprint/fingerprint.go @@ -56,7 +56,7 @@ func (f Fingerprint) Equal(other *Fingerprint) bool { if l0 != l1 { return false } - for i := 0; i < l0; i++ { + for i := range l0 { if other.firstBytes[i] != f.firstBytes[i] { return false } diff --git a/pkg/stanza/fileconsumer/internal/reader/reader_test.go b/pkg/stanza/fileconsumer/internal/reader/reader_test.go index b2bbcc3f1d5d3..3a5008d05a67e 100644 --- a/pkg/stanza/fileconsumer/internal/reader/reader_test.go +++ b/pkg/stanza/fileconsumer/internal/reader/reader_test.go @@ -340,7 +340,7 @@ func BenchmarkFileRead(b *testing.B) { _, err := temp.WriteString(temp.Name() + "\n") require.NoError(b, err) // Write half the content before starting the benchmark - for i := 0; i < 100; i++ { + for range 100 { _, err := temp.WriteString(string(filetest.TokenWithLength(999)) + "\n") require.NoError(b, err) } @@ -355,7 +355,7 @@ func BenchmarkFileRead(b *testing.B) { }) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { file, err := os.OpenFile(temp.Name(), os.O_CREATE|os.O_RDWR, 0o600) require.NoError(b, err) fp, err := f.NewFingerprint(file) diff --git a/pkg/stanza/fileconsumer/internal/tracker/tracker.go b/pkg/stanza/fileconsumer/internal/tracker/tracker.go index c7e6e897f0e3a..beac72a9863fd 100644 --- a/pkg/stanza/fileconsumer/internal/tracker/tracker.go +++ b/pkg/stanza/fileconsumer/internal/tracker/tracker.go @@ -98,7 +98,7 @@ func (t *fileTracker) GetOpenFile(fp *fingerprint.Fingerprint) *reader.Reader { } func (t *fileTracker) GetClosedFile(fp *fingerprint.Fingerprint) *reader.Metadata { - for i := 0; i < len(t.knownFiles); i++ { + for i := range len(t.knownFiles) { if oldMetadata := t.knownFiles[i].Match(fp, fileset.StartsWith); oldMetadata != nil { return oldMetadata } @@ -154,7 +154,7 @@ func (t *fileTracker) EndPoll() { func (t *fileTracker) TotalReaders() int { total := t.previousPollFiles.Len() - for i := 0; i < len(t.knownFiles); i++ { + for i := range len(t.knownFiles) { total += t.knownFiles[i].Len() } return total @@ -209,7 +209,7 @@ func (t *fileTracker) rewriteArchive(ctx context.Context, previousPollsToArchive // Refer archive.md for the detailed design if mod(t.archiveIndex-1, previousPollsToArchive) > t.pollsToArchive { - for i := 0; i < t.pollsToArchive; i++ { + for i := range t.pollsToArchive { if err := rewrite(i, leastRecentIndex); err != nil { t.set.Logger.Error("error while swapping archive", zap.Error(err)) } @@ -221,7 +221,7 @@ func (t *fileTracker) rewriteArchive(ctx context.Context, previousPollsToArchive // If the current index points at an unset key, no need to do anything return } - for i := 0; i < t.pollsToArchive-t.archiveIndex; i++ { + for i := range t.pollsToArchive - t.archiveIndex { if err := rewrite(t.archiveIndex+i, leastRecentIndex); err != nil { t.set.Logger.Warn("error while swapping archive", zap.Error(err)) } @@ -328,7 +328,7 @@ func (t *fileTracker) FindFiles(fps []*fingerprint.Fingerprint) []*reader.Metada matchedMetadata := make([]*reader.Metadata, len(fps)) // continue executing the loop until either all records are matched or all archive sets have been processed. - for i := 0; i < t.pollsToArchive; i++ { + for range t.pollsToArchive { // Update the mostRecentIndex nextIndex = (nextIndex - 1 + t.pollsToArchive) % t.pollsToArchive diff --git a/pkg/stanza/fileconsumer/internal/tracker/tracker_test.go b/pkg/stanza/fileconsumer/internal/tracker/tracker_test.go index 96329055e5888..2237631c48f14 100644 --- a/pkg/stanza/fileconsumer/internal/tracker/tracker_test.go +++ b/pkg/stanza/fileconsumer/internal/tracker/tracker_test.go @@ -23,7 +23,7 @@ import ( func TestFindFilesOrder(t *testing.T) { fps := make([]*fingerprint.Fingerprint, 0) - for i := 0; i < 100; i++ { + for range 100 { fps = append(fps, fingerprint.New([]byte(uuid.NewString()))) } persister := testutil.NewUnscopedMockPersister() @@ -34,7 +34,7 @@ func TestFindFilesOrder(t *testing.T) { require.Equal(t, len(fps), len(matchables), "return slice should be of same length as input slice") - for i := 0; i < len(matchables); i++ { + for i := range len(matchables) { if fpInStorage[i] { // if current fingerprint is present in storage, the corresponding return type should not be nil require.NotNilf(t, matchables[i], "resulting index %d should be not be nil type", i) @@ -55,7 +55,7 @@ func TestIndexInBounds(t *testing.T) { require.Equal(t, 0, tracker.archiveIndex) // run archiving. Each time, index should be in bound. - for i := 0; i < 1099; i++ { + for i := range 1099 { require.Equalf(t, i%pollsToArchive, tracker.archiveIndex, "Index should %d, but was %d", i%pollsToArchive, tracker.archiveIndex) tracker.archive(&fileset.Fileset[*reader.Metadata]{}) require.Truef(t, tracker.archiveIndex >= 0 && tracker.archiveIndex < pollsToArchive, "Index should be between 0 and %d, but was %d", pollsToArchive, tracker.archiveIndex) @@ -96,7 +96,7 @@ func testArchiveRestoration(t *testing.T, pollsToArchive int, newPollsToArchive persister := testutil.NewUnscopedMockPersister() tracker := NewFileTracker(context.Background(), componenttest.NewNopTelemetrySettings(), 0, pollsToArchive, persister).(*fileTracker) iterations := int(pct * float32(pollsToArchive)) - for i := 0; i < iterations; i++ { + for i := range iterations { fileset := &fileset.Fileset[*reader.Metadata]{} fileset.Add(&reader.Metadata{ // for the sake of this test case. @@ -106,7 +106,7 @@ func testArchiveRestoration(t *testing.T, pollsToArchive int, newPollsToArchive tracker.archive(fileset) } // make sure all keys are present in persister - for i := 0; i < iterations; i++ { + for i := range iterations { archiveIndex := i % pollsToArchive val, err := persister.Get(context.Background(), archiveKey(archiveIndex)) require.NoError(t, err) @@ -114,7 +114,7 @@ func testArchiveRestoration(t *testing.T, pollsToArchive int, newPollsToArchive } // also, make sure we have not written "extra" stuff (for partially filled archive) count := 0 - for i := 0; i < pollsToArchive; i++ { + for i := range pollsToArchive { val, err := persister.Get(context.Background(), archiveKey(i)) require.NoError(t, err) if val != nil { @@ -128,7 +128,7 @@ func testArchiveRestoration(t *testing.T, pollsToArchive int, newPollsToArchive // start from most recent element startIdx := mod(tracker.archiveIndex-1, newPollsToArchive) mostRecentIteration := iterations - 1 - for i := 0; i < newPollsToArchive; i++ { + for range newPollsToArchive { val, err := tracker.readArchive(startIdx) require.NoError(t, err) if val.Len() > 0 { diff --git a/pkg/stanza/fileconsumer/matcher/internal/filter/filter_test.go b/pkg/stanza/fileconsumer/matcher/internal/filter/filter_test.go index fb4a865026c54..f174cf7c3e224 100644 --- a/pkg/stanza/fileconsumer/matcher/internal/filter/filter_test.go +++ b/pkg/stanza/fileconsumer/matcher/internal/filter/filter_test.go @@ -93,7 +93,7 @@ func TestFilter(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { opts := make([]Option, 0, tc.numOpts) - for i := 0; i < tc.numOpts; i++ { + for range tc.numOpts { opts = append(opts, &removeFirst{}) } result, err := Filter(tc.values, regexp.MustCompile(tc.regex), opts...) diff --git a/pkg/stanza/fileconsumer/rotation_test.go b/pkg/stanza/fileconsumer/rotation_test.go index bc8f067f535a7..da5bfc215bd10 100644 --- a/pkg/stanza/fileconsumer/rotation_test.go +++ b/pkg/stanza/fileconsumer/rotation_test.go @@ -44,9 +44,9 @@ func TestCopyTruncate(t *testing.T) { numRotations := 3 expected := make([][]byte, 0, numFiles*numMessages*numRotations) - for i := 0; i < numFiles; i++ { - for j := 0; j < numMessages; j++ { - for k := 0; k < numRotations; k++ { + for i := range numFiles { + for j := range numMessages { + for k := range numRotations { expected = append(expected, []byte(getMessage(i, k, j))) } } @@ -58,14 +58,14 @@ func TestCopyTruncate(t *testing.T) { }() var wg sync.WaitGroup - for fileNum := 0; fileNum < numFiles; fileNum++ { + for fileNum := range numFiles { wg.Add(1) go func(fn int) { defer wg.Done() file := filetest.OpenFile(t, baseFileName(fn)) - for rotationNum := 0; rotationNum < numRotations; rotationNum++ { - for messageNum := 0; messageNum < numMessages; messageNum++ { + for rotationNum := range numRotations { + for messageNum := range numMessages { filetest.WriteString(t, file, getMessage(fn, rotationNum, messageNum)+"\n") time.Sleep(10 * time.Millisecond) } @@ -105,9 +105,9 @@ func TestMoveCreate(t *testing.T) { numRotations := 3 expected := make([][]byte, 0, numFiles*numMessages*numRotations) - for i := 0; i < numFiles; i++ { - for j := 0; j < numMessages; j++ { - for k := 0; k < numRotations; k++ { + for i := range numFiles { + for j := range numMessages { + for k := range numRotations { expected = append(expected, []byte(getMessage(i, k, j))) } } @@ -119,14 +119,14 @@ func TestMoveCreate(t *testing.T) { }() var wg sync.WaitGroup - for fileNum := 0; fileNum < numFiles; fileNum++ { + for fileNum := range numFiles { wg.Add(1) go func(fn int) { defer wg.Done() - for rotationNum := 0; rotationNum < numRotations; rotationNum++ { + for rotationNum := range numRotations { file := filetest.OpenFile(t, baseFileName(fn)) - for messageNum := 0; messageNum < numMessages; messageNum++ { + for messageNum := range numMessages { filetest.WriteString(t, file, getMessage(fn, rotationNum, messageNum)+"\n") time.Sleep(10 * time.Millisecond) } diff --git a/pkg/stanza/operator/helper/emitter_test.go b/pkg/stanza/operator/helper/emitter_test.go index 927734dccf56b..99da169ca0574 100644 --- a/pkg/stanza/operator/helper/emitter_test.go +++ b/pkg/stanza/operator/helper/emitter_test.go @@ -176,7 +176,7 @@ func complexEntry() *entry.Entry { func complexEntriesForNDifferentHosts(count int, n int) []*entry.Entry { ret := make([]*entry.Entry, count) - for i := 0; i < count; i++ { + for i := range count { e := entry.New() e.Severity = entry.Error e.Resource = map[string]any{ diff --git a/pkg/stanza/operator/helper/expr_string.go b/pkg/stanza/operator/helper/expr_string.go index 9ec71be8c0258..e3745c9f17219 100644 --- a/pkg/stanza/operator/helper/expr_string.go +++ b/pkg/stanza/operator/helper/expr_string.go @@ -109,7 +109,7 @@ type ExprString struct { // Render will render an ExprString as a string func (e *ExprString) Render(env map[string]any) (string, error) { var b strings.Builder - for i := 0; i < len(e.SubExprs); i++ { + for i := range len(e.SubExprs) { b.WriteString(e.SubStrings[i]) out, err := vm.Run(e.SubExprs[i], env) if err != nil { diff --git a/pkg/stanza/operator/helper/ip_resolver_test.go b/pkg/stanza/operator/helper/ip_resolver_test.go index 7f4c82eec3d0e..9dd187ebc39c2 100644 --- a/pkg/stanza/operator/helper/ip_resolver_test.go +++ b/pkg/stanza/operator/helper/ip_resolver_test.go @@ -43,7 +43,7 @@ func TestIPResolver100Hits(t *testing.T) { expireTime: time.Now().Add(time.Hour), } - for i := 0; i < 100; i++ { + for range 100 { require.Equal(t, "definitely invalid hostname", resolver.GetHostFromIP("127.0.0.1")) } resolver.Stop() diff --git a/pkg/stanza/operator/input/generate/input_test.go b/pkg/stanza/operator/input/generate/input_test.go index 7a3156562b6bb..1373e9b642f16 100644 --- a/pkg/stanza/operator/input/generate/input_test.go +++ b/pkg/stanza/operator/input/generate/input_test.go @@ -35,7 +35,7 @@ func TestInputGenerate(t *testing.T) { require.NoError(t, op.Stop()) }() - for i := 0; i < 5; i++ { + for range 5 { fake.ExpectBody(t, "test message") } } diff --git a/pkg/stanza/operator/input/tcp/input_test.go b/pkg/stanza/operator/input/tcp/input_test.go index 109533209be1f..9cfa0e5d6677b 100644 --- a/pkg/stanza/operator/input/tcp/input_test.go +++ b/pkg/stanza/operator/input/tcp/input_test.go @@ -456,7 +456,7 @@ func BenchmarkTCPInput(b *testing.B) { } }() - for i := 0; i < b.N; i++ { + for range b.N { <-fakeOutput.Received } diff --git a/pkg/stanza/operator/input/udp/input.go b/pkg/stanza/operator/input/udp/input.go index bd70196bbd095..7ffa4ec2f2ddb 100644 --- a/pkg/stanza/operator/input/udp/input.go +++ b/pkg/stanza/operator/input/udp/input.go @@ -72,12 +72,12 @@ func (i *Input) goHandleMessages(ctx context.Context) { return } - for n := 0; n < i.AsyncConfig.Readers; n++ { + for range i.AsyncConfig.Readers { i.wgReader.Add(1) go i.readMessagesAsync(ctx) } - for n := 0; n < i.AsyncConfig.Processors; n++ { + for range i.AsyncConfig.Processors { i.wg.Add(1) go i.processMessagesAsync(ctx) } diff --git a/pkg/stanza/operator/input/udp/input_test.go b/pkg/stanza/operator/input/udp/input_test.go index 8c7cb40eb44a6..4bf90c4a78e74 100644 --- a/pkg/stanza/operator/input/udp/input_test.go +++ b/pkg/stanza/operator/input/udp/input_test.go @@ -247,7 +247,7 @@ func BenchmarkUDPInput(b *testing.B) { } }() - for i := 0; i < b.N; i++ { + for range b.N { <-fakeOutput.Received } diff --git a/pkg/stanza/operator/parser/json/parser_test.go b/pkg/stanza/operator/parser/json/parser_test.go index 4563b4ded8166..c7867ecaab73d 100644 --- a/pkg/stanza/operator/parser/json/parser_test.go +++ b/pkg/stanza/operator/parser/json/parser_test.go @@ -304,7 +304,7 @@ func benchmarkOperator(b *testing.B, parser operator.Operator) { e := entry.Entry{Body: string(body)} - for i := 0; i < b.N; i++ { + for range b.N { err := parser.Process(context.Background(), &e) require.NoError(b, err) } diff --git a/pkg/stanza/operator/parser/regex/parser_test.go b/pkg/stanza/operator/parser/regex/parser_test.go index 80ba837ed1b50..51b2aa47c4dce 100644 --- a/pkg/stanza/operator/parser/regex/parser_test.go +++ b/pkg/stanza/operator/parser/regex/parser_test.go @@ -267,7 +267,7 @@ func benchmarkParse(b *testing.B, parser *Parser, input []string) { // No cache func BenchmarkParseNoCache(b *testing.B) { parser := newTestBenchParser(&testing.T{}, 0) - for n := 0; n < b.N; n++ { + for range b.N { benchmarkParseThreaded(b, parser, benchParsePatterns) } } @@ -275,7 +275,7 @@ func BenchmarkParseNoCache(b *testing.B) { // Memory cache at capacity func BenchmarkParseWithMemoryCache(b *testing.B) { parser := newTestBenchParser(&testing.T{}, 100) - for n := 0; n < b.N; n++ { + for range b.N { benchmarkParseThreaded(b, parser, benchParsePatterns) } } @@ -283,7 +283,7 @@ func BenchmarkParseWithMemoryCache(b *testing.B) { // Memory cache over capacity by one func BenchmarkParseWithMemoryCacheFullByOne(b *testing.B) { parser := newTestBenchParser(&testing.T{}, 99) - for n := 0; n < b.N; n++ { + for range b.N { benchmarkParseThreaded(b, parser, benchParsePatterns) } } @@ -291,7 +291,7 @@ func BenchmarkParseWithMemoryCacheFullByOne(b *testing.B) { // Memory cache over capacity by 10 func BenchmarkParseWithMemoryCacheFullBy10(b *testing.B) { parser := newTestBenchParser(&testing.T{}, 90) - for n := 0; n < b.N; n++ { + for range b.N { benchmarkParseThreaded(b, parser, benchParsePatterns) } } @@ -299,7 +299,7 @@ func BenchmarkParseWithMemoryCacheFullBy10(b *testing.B) { // Memory cache over capacity by 50 func BenchmarkParseWithMemoryCacheFullBy50(b *testing.B) { parser := newTestBenchParser(&testing.T{}, 50) - for n := 0; n < b.N; n++ { + for range b.N { benchmarkParseThreaded(b, parser, benchParsePatterns) } } @@ -307,7 +307,7 @@ func BenchmarkParseWithMemoryCacheFullBy50(b *testing.B) { // Memory cache over capacity by 90 func BenchmarkParseWithMemoryCacheFullBy90(b *testing.B) { parser := newTestBenchParser(&testing.T{}, 10) - for n := 0; n < b.N; n++ { + for range b.N { benchmarkParseThreaded(b, parser, benchParsePatterns) } } @@ -315,7 +315,7 @@ func BenchmarkParseWithMemoryCacheFullBy90(b *testing.B) { // Memory cache over capacity by 99 func BenchmarkParseWithMemoryCacheFullBy99(b *testing.B) { parser := newTestBenchParser(&testing.T{}, 1) - for n := 0; n < b.N; n++ { + for range b.N { benchmarkParseThreaded(b, parser, benchParsePatterns) } } @@ -323,7 +323,7 @@ func BenchmarkParseWithMemoryCacheFullBy99(b *testing.B) { // No cache one file func BenchmarkParseNoCacheOneFile(b *testing.B) { parser := newTestBenchParser(&testing.T{}, 0) - for n := 0; n < b.N; n++ { + for range b.N { pattern := []string{benchParsePatterns[0]} benchmarkParse(b, parser, pattern) } @@ -332,7 +332,7 @@ func BenchmarkParseNoCacheOneFile(b *testing.B) { // Memory cache one file func BenchmarkParseWithMemoryCacheOneFile(b *testing.B) { parser := newTestBenchParser(&testing.T{}, 100) - for n := 0; n < b.N; n++ { + for range b.N { pattern := []string{benchParsePatterns[0]} benchmarkParse(b, parser, pattern) } diff --git a/pkg/stanza/operator/parser/uri/parser_test.go b/pkg/stanza/operator/parser/uri/parser_test.go index d0c856cae1e1b..312433934c052 100644 --- a/pkg/stanza/operator/parser/uri/parser_test.go +++ b/pkg/stanza/operator/parser/uri/parser_test.go @@ -217,7 +217,7 @@ func TestBuildParserURL(t *testing.T) { func BenchmarkParserParse(b *testing.B) { v := "https://dev:password@www.golang.org:8443/v1/app/stage?token=d9e28b1d-2c7b-4853-be6a-d94f34a5d4ab&env=prod&env=stage&token=c6fa29f9-a31b-4584-b98d-aa8473b0e18d®ion=us-east1b&mode=fast" parser := Parser{} - for n := 0; n < b.N; n++ { + for range b.N { if _, err := parser.parse(v); err != nil { b.Fatal(err) } diff --git a/pkg/stanza/operator/transformer/recombine/transformer_test.go b/pkg/stanza/operator/transformer/recombine/transformer_test.go index d2332fa045dc6..7767c81e49d30 100644 --- a/pkg/stanza/operator/transformer/recombine/transformer_test.go +++ b/pkg/stanza/operator/transformer/recombine/transformer_test.go @@ -757,8 +757,8 @@ func BenchmarkRecombine(b *testing.B) { sourcesNum := 10 logsNum := 10 entries := []*entry.Entry{} - for i := 0; i < logsNum; i++ { - for j := 0; j < sourcesNum; j++ { + for i := range logsNum { + for j := range sourcesNum { start := entry.New() start.Timestamp = time.Now() start.Body = strings.Repeat(fmt.Sprintf("log-%d", i), 50) @@ -770,7 +770,7 @@ func BenchmarkRecombine(b *testing.B) { ctx := context.Background() b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { for _, e := range entries { require.NoError(b, recombine.Process(ctx, e)) } @@ -809,7 +809,7 @@ func BenchmarkRecombineLimitTrigger(b *testing.B) { ctx := context.Background() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { require.NoError(b, recombine.Process(ctx, start)) require.NoError(b, recombine.Process(ctx, next)) require.NoError(b, recombine.Process(ctx, start)) diff --git a/pkg/stanza/pipeline/config_test.go b/pkg/stanza/pipeline/config_test.go index b2d916680e7a4..12f74d817f403 100644 --- a/pkg/stanza/pipeline/config_test.go +++ b/pkg/stanza/pipeline/config_test.go @@ -377,7 +377,7 @@ func TestUpdateOutputIDs(t *testing.T) { } require.Len(t, ops, expectedNumOps) - for i := 0; i < len(ops); i++ { + for i := range len(ops) { id := ops[i].ID() if id == "fake" { require.Nil(t, ops[i].GetOutputIDs()) diff --git a/pkg/stanza/split/splittest/splittest.go b/pkg/stanza/split/splittest/splittest.go index 56b122f181306..7adb701fe94af 100644 --- a/pkg/stanza/split/splittest/splittest.go +++ b/pkg/stanza/split/splittest/splittest.go @@ -122,7 +122,7 @@ func ScanLinesStrict(data []byte, atEOF bool) (advance int, token []byte, err er func GenerateBytes(length int) []byte { chars := []byte(`abcdefghijklmnopqrstuvwxyz`) newSlice := make([]byte, length) - for i := 0; i < length; i++ { + for i := range length { newSlice[i] = chars[i%len(chars)] } return newSlice diff --git a/pkg/stanza/testutil/mocks.go b/pkg/stanza/testutil/mocks.go index 185f5871d6c27..6ca23302dbdb3 100644 --- a/pkg/stanza/testutil/mocks.go +++ b/pkg/stanza/testutil/mocks.go @@ -117,7 +117,7 @@ func (f *FakeOutput) ExpectEntry(tb testing.TB, expected *entry.Entry) { // ExpectEntries expects that the given entries will be received in any order func (f *FakeOutput) ExpectEntries(tb testing.TB, expected []*entry.Entry) { entries := make([]*entry.Entry, 0, len(expected)) - for i := 0; i < len(expected); i++ { + for range len(expected) { select { case e := <-f.Received: entries = append(entries, e) diff --git a/pkg/translator/azure/resourcelogs_to_logs.go b/pkg/translator/azure/resourcelogs_to_logs.go index 5b608570612a9..5d0460d6303c0 100644 --- a/pkg/translator/azure/resourcelogs_to_logs.go +++ b/pkg/translator/azure/resourcelogs_to_logs.go @@ -105,7 +105,7 @@ func (r ResourceLogsUnmarshaler) UnmarshalLogs(buf []byte) (plog.Logs, error) { scopeLogs.Scope().SetVersion(r.Version) logRecords := scopeLogs.LogRecords() - for i := 0; i < len(logs); i++ { + for i := range len(logs) { log := logs[i] nanos, err := getTimestamp(log, r.TimeFormats...) if err != nil { diff --git a/pkg/translator/azurelogs/resourcelogs_to_logs.go b/pkg/translator/azurelogs/resourcelogs_to_logs.go index 437df40961631..4856fcc77a1e1 100644 --- a/pkg/translator/azurelogs/resourcelogs_to_logs.go +++ b/pkg/translator/azurelogs/resourcelogs_to_logs.go @@ -108,7 +108,7 @@ func (r ResourceLogsUnmarshaler) UnmarshalLogs(buf []byte) (plog.Logs, error) { scopeLogs.Scope().SetVersion(r.Version) logRecords := scopeLogs.LogRecords() - for i := 0; i < len(logs); i++ { + for i := range len(logs) { log := logs[i] nanos, err := getTimestamp(log, r.TimeFormats...) if err != nil { diff --git a/pkg/translator/jaeger/jaegerproto_to_traces_test.go b/pkg/translator/jaeger/jaegerproto_to_traces_test.go index 2317961c2586b..7cc99830c56f0 100644 --- a/pkg/translator/jaeger/jaegerproto_to_traces_test.go +++ b/pkg/translator/jaeger/jaegerproto_to_traces_test.go @@ -539,9 +539,9 @@ func TestProtoBatchesToInternalTraces(t *testing.T) { lenbatches := expected.ResourceSpans().Len() found := 0 - for i := 0; i < lenbatches; i++ { + for i := range lenbatches { rsExpected := expected.ResourceSpans().At(i) - for j := 0; j < lenbatches; j++ { + for j := range lenbatches { got.ResourceSpans().RemoveIf(func(_ ptrace.ResourceSpans) bool { nameExpected := rsExpected.ScopeSpans().At(0).Spans().At(0).Name() nameGot := got.ResourceSpans().At(j).ScopeSpans().At(0).Scope().Name() @@ -1061,7 +1061,7 @@ func BenchmarkProtoBatchToInternalTraces(b *testing.B) { } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { _, err := ProtoToTraces(jb) assert.NoError(b, err) } diff --git a/pkg/translator/jaeger/jaegerthrift_to_traces_test.go b/pkg/translator/jaeger/jaegerthrift_to_traces_test.go index 92ab1f3923f38..1c2261ef43a7e 100644 --- a/pkg/translator/jaeger/jaegerthrift_to_traces_test.go +++ b/pkg/translator/jaeger/jaegerthrift_to_traces_test.go @@ -340,7 +340,7 @@ func BenchmarkThriftBatchToInternalTraces(b *testing.B) { } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { _, err := ThriftToTraces(jb) assert.NoError(b, err) } diff --git a/pkg/translator/jaeger/traces_to_jaegerproto.go b/pkg/translator/jaeger/traces_to_jaegerproto.go index 4a8f7e6efe9e2..dfb51ed692944 100644 --- a/pkg/translator/jaeger/traces_to_jaegerproto.go +++ b/pkg/translator/jaeger/traces_to_jaegerproto.go @@ -23,7 +23,7 @@ func ProtoFromTraces(td ptrace.Traces) []*model.Batch { } batches := make([]*model.Batch, 0, resourceSpans.Len()) - for i := 0; i < resourceSpans.Len(); i++ { + for i := range resourceSpans.Len() { rs := resourceSpans.At(i) batch := resourceSpansToJaegerProto(rs) if batch != nil { @@ -54,10 +54,10 @@ func resourceSpansToJaegerProto(rs ptrace.ResourceSpans) *model.Batch { // instrumentation library info. jSpans := make([]*model.Span, 0, ilss.At(0).Spans().Len()) - for i := 0; i < ilss.Len(); i++ { + for i := range ilss.Len() { ils := ilss.At(i) spans := ils.Spans() - for j := 0; j < spans.Len(); j++ { + for j := range spans.Len() { span := spans.At(j) jSpan := spanToJaegerProto(span, ils.Scope()) if jSpan != nil { @@ -256,7 +256,7 @@ func makeJaegerProtoReferences(links ptrace.SpanLinkSlice, parentSpanID model.Sp }) } - for i := 0; i < links.Len(); i++ { + for i := range links.Len() { link := links.At(i) linkTraceID := traceIDToJaegerProto(link.TraceID()) linkSpanID := spanIDToJaegerProto(link.SpanID()) @@ -282,7 +282,7 @@ func spanEventsToJaegerProtoLogs(events ptrace.SpanEventSlice) []model.Log { } logs := make([]model.Log, 0, events.Len()) - for i := 0; i < events.Len(); i++ { + for i := range events.Len() { event := events.At(i) fields := make([]model.KeyValue, 0, event.Attributes().Len()+1) _, eventAttrFound := event.Attributes().Get(eventNameAttr) diff --git a/pkg/translator/jaeger/traces_to_jaegerproto_test.go b/pkg/translator/jaeger/traces_to_jaegerproto_test.go index 26d7f39ba889c..35e4fc5fba72b 100644 --- a/pkg/translator/jaeger/traces_to_jaegerproto_test.go +++ b/pkg/translator/jaeger/traces_to_jaegerproto_test.go @@ -374,7 +374,7 @@ func BenchmarkInternalTracesToJaegerProto(b *testing.B) { resource.CopyTo(td.ResourceSpans().At(0).Resource()) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { batches := ProtoFromTraces(td) assert.NotEmpty(b, batches) } diff --git a/pkg/translator/loki/convert.go b/pkg/translator/loki/convert.go index 474d5084e4ce8..998d72accedd5 100644 --- a/pkg/translator/loki/convert.go +++ b/pkg/translator/loki/convert.go @@ -104,7 +104,7 @@ func getAttribute(attr string, attributes pcommon.Map) (pcommon.Value, bool) { // perhaps it's a nested attribute? segments := strings.Split(attr, attrSeparator) segmentsNumber := len(segments) - for i := 0; i < segmentsNumber-1; i++ { + for i := range segmentsNumber - 1 { left := strings.Join(segments[:segmentsNumber-i-1], attrSeparator) right := strings.Join(segments[segmentsNumber-i-1:], attrSeparator) diff --git a/pkg/translator/loki/encode.go b/pkg/translator/loki/encode.go index 90721954a437b..57d37b37ae570 100644 --- a/pkg/translator/loki/encode.go +++ b/pkg/translator/loki/encode.go @@ -189,7 +189,7 @@ func valueToKeyvals(key string, value pcommon.Value) []any { prefix = key + "_" } var keyvals []any - for i := 0; i < value.Slice().Len(); i++ { + for i := range value.Slice().Len() { v := value.Slice().At(i) keyvals = append(keyvals, valueToKeyvals(fmt.Sprintf("%s%d", prefix, i), v)...) } diff --git a/pkg/translator/loki/logs_to_loki.go b/pkg/translator/loki/logs_to_loki.go index 9a5ddc11b071c..03c1db44f960d 100644 --- a/pkg/translator/loki/logs_to_loki.go +++ b/pkg/translator/loki/logs_to_loki.go @@ -51,14 +51,14 @@ func LogsToLokiRequests(ld plog.Logs, defaultLabelsEnabled map[string]bool) map[ groups := map[string]pushRequestGroup{} rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { ills := rls.At(i).ScopeLogs() resource := rls.At(i).Resource() - for j := 0; j < ills.Len(); j++ { + for j := range ills.Len() { logs := ills.At(j).LogRecords() scope := ills.At(j).Scope() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { log := logs.At(k) tenant := GetTenantFromTenantHint(log.Attributes(), resource.Attributes()) group, ok := groups[tenant] diff --git a/pkg/translator/loki/logs_to_loki_test.go b/pkg/translator/loki/logs_to_loki_test.go index 342fb500a3ddc..f70c988e80cbf 100644 --- a/pkg/translator/loki/logs_to_loki_test.go +++ b/pkg/translator/loki/logs_to_loki_test.go @@ -225,12 +225,12 @@ func TestLogsToLokiRequestWithGroupingByTenant(t *testing.T) { assert.True(t, ok) streams := request.Streams - for s := 0; s < len(streams); s++ { + for s := range len(streams) { gotStream := request.Streams[s] wantStream := want.Streams[s] assert.Equal(t, wantStream.Labels, gotStream.Labels) - for e := 0; e < len(gotStream.Entries); e++ { + for e := range len(gotStream.Entries) { assert.Equal(t, wantStream.Entries[e].Line, gotStream.Entries[e].Line) } } @@ -431,7 +431,7 @@ func TestLogsToLokiRequestWithoutTenant(t *testing.T) { // prepare ld := plog.NewLogs() ld.ResourceLogs().AppendEmpty() - for i := 0; i < 3; i++ { + for i := range 3 { ld.ResourceLogs().At(0).ScopeLogs().AppendEmpty() ld.ResourceLogs().At(0).ScopeLogs().At(i).LogRecords().AppendEmpty() ld.ResourceLogs().At(0).ScopeLogs().At(i).LogRecords().At(0).SetTraceID([16]byte{byte(i + 1)}) @@ -447,11 +447,11 @@ func TestLogsToLokiRequestWithoutTenant(t *testing.T) { } rlogs := ld.ResourceLogs() - for i := 0; i < rlogs.Len(); i++ { + for i := range rlogs.Len() { slogs := rlogs.At(i).ScopeLogs() - for j := 0; j < slogs.Len(); j++ { + for j := range slogs.Len() { logs := slogs.At(j).LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { log := logs.At(k) attrs := map[string]any{} if len(tt.attrs) > 0 { @@ -487,7 +487,7 @@ func TestLogsToLokiRequestWithoutTenant(t *testing.T) { assert.Equal(t, tt.expectedLabel, request.Streams[0].Labels) entries := request.Streams[0].Entries - for i := 0; i < len(entries); i++ { + for i := range len(entries) { assert.Equal(t, tt.expectedLines[i], entries[i].Line) } }) diff --git a/pkg/translator/opencensus/metrics_to_oc.go b/pkg/translator/opencensus/metrics_to_oc.go index 7adfd34c6454d..3586ffc66ed59 100644 --- a/pkg/translator/opencensus/metrics_to_oc.go +++ b/pkg/translator/opencensus/metrics_to_oc.go @@ -35,11 +35,11 @@ func ResourceMetricsToOC(rm pmetric.ResourceMetrics) (*occommon.Node, *ocresourc // Approximate the number of the metrics as the number of the metrics in the first // instrumentation library info. ocMetrics := make([]*ocmetrics.Metric, 0, ilms.At(0).Metrics().Len()) - for i := 0; i < ilms.Len(); i++ { + for i := range ilms.Len() { ilm := ilms.At(i) // TODO: Handle instrumentation library name and version. metrics := ilm.Metrics() - for j := 0; j < metrics.Len(); j++ { + for j := range metrics.Len() { ocMetrics = append(ocMetrics, metricToOC(metrics.At(j))) } } @@ -127,7 +127,7 @@ func collectLabelKeysAndValueType(metric pmetric.Metric) *labelKeysAndType { // collectLabelKeysNumberDataPoints returns true if all values are int. func collectLabelKeysNumberDataPoints(dps pmetric.NumberDataPointSlice, keySet map[string]struct{}) bool { allInt := true - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { addLabelKeys(keySet, dps.At(i).Attributes()) if dps.At(i).ValueType() != pmetric.NumberDataPointValueTypeInt { allInt = false @@ -137,13 +137,13 @@ func collectLabelKeysNumberDataPoints(dps pmetric.NumberDataPointSlice, keySet m } func collectLabelKeysHistogramDataPoints(dhdp pmetric.HistogramDataPointSlice, keySet map[string]struct{}) { - for i := 0; i < dhdp.Len(); i++ { + for i := range dhdp.Len() { addLabelKeys(keySet, dhdp.At(i).Attributes()) } } func collectLabelKeysSummaryDataPoints(dhdp pmetric.SummaryDataPointSlice, keySet map[string]struct{}) { - for i := 0; i < dhdp.Len(); i++ { + for i := range dhdp.Len() { addLabelKeys(keySet, dhdp.At(i).Attributes()) } } @@ -211,7 +211,7 @@ func numberDataPointsToOC(dps pmetric.NumberDataPointSlice, labelKeys *labelKeys return nil } timeseries := make([]*ocmetrics.TimeSeries, 0, dps.Len()) - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) point := &ocmetrics.Point{ Timestamp: timestampAsTimestampPb(dp.Timestamp()), @@ -241,7 +241,7 @@ func doubleHistogramPointToOC(dps pmetric.HistogramDataPointSlice, labelKeys *la return nil } timeseries := make([]*ocmetrics.TimeSeries, 0, dps.Len()) - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) buckets := histogramBucketsToOC(dp.BucketCounts()) exemplarsToOC(dp.ExplicitBounds(), buckets, dp.Exemplars()) @@ -289,7 +289,7 @@ func histogramBucketsToOC(bcts pcommon.UInt64Slice) []*ocmetrics.DistributionVal } ocBuckets := make([]*ocmetrics.DistributionValue_Bucket, 0, bcts.Len()) - for i := 0; i < bcts.Len(); i++ { + for i := range bcts.Len() { ocBuckets = append(ocBuckets, &ocmetrics.DistributionValue_Bucket{ Count: int64(bcts.At(i)), }) @@ -302,7 +302,7 @@ func doubleSummaryPointToOC(dps pmetric.SummaryDataPointSlice, labelKeys *labelK return nil } timeseries := make([]*ocmetrics.TimeSeries, 0, dps.Len()) - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) percentileValues := summaryPercentilesToOC(dp.QuantileValues()) @@ -335,7 +335,7 @@ func summaryPercentilesToOC(qtls pmetric.SummaryDataPointValueAtQuantileSlice) [ } ocPercentiles := make([]*ocmetrics.SummaryValue_Snapshot_ValueAtPercentile, 0, qtls.Len()) - for i := 0; i < qtls.Len(); i++ { + for i := range qtls.Len() { quantile := qtls.At(i) ocPercentiles = append(ocPercentiles, &ocmetrics.SummaryValue_Snapshot_ValueAtPercentile{ Percentile: quantile.Quantile() * 100, @@ -350,7 +350,7 @@ func exemplarsToOC(bounds pcommon.Float64Slice, ocBuckets []*ocmetrics.Distribut return } - for i := 0; i < exemplars.Len(); i++ { + for i := range exemplars.Len() { exemplar := exemplars.At(i) var val float64 switch exemplar.ValueType() { @@ -396,7 +396,7 @@ func attributeValuesToOC(labels pcommon.Map, labelKeys *labelKeysAndType) []*ocm // (The order matches key indices) labelValuesOrig := make([]ocmetrics.LabelValue, len(labelKeys.keys)) labelValues := make([]*ocmetrics.LabelValue, len(labelKeys.keys)) - for i := 0; i < len(labelKeys.keys); i++ { + for i := range len(labelKeys.keys) { labelValues[i] = &labelValuesOrig[i] } diff --git a/pkg/translator/opencensus/oc_to_metrics.go b/pkg/translator/opencensus/oc_to_metrics.go index 4af329f24dcdc..4516d72539527 100644 --- a/pkg/translator/opencensus/oc_to_metrics.go +++ b/pkg/translator/opencensus/oc_to_metrics.go @@ -201,7 +201,7 @@ func fillAttributesMap(ocLabelsKeys []*ocmetrics.LabelKey, ocLabelValues []*ocme } attributesMap.EnsureCapacity(lablesCount) - for i := 0; i < lablesCount; i++ { + for i := range lablesCount { if !ocLabelValues[i].GetHasValue() { continue } diff --git a/pkg/translator/opencensus/oc_to_metrics_test.go b/pkg/translator/opencensus/oc_to_metrics_test.go index 6fb48154a5d93..6aa5ba6b141b1 100644 --- a/pkg/translator/opencensus/oc_to_metrics_test.go +++ b/pkg/translator/opencensus/oc_to_metrics_test.go @@ -164,7 +164,7 @@ func BenchmarkMetricIntOCToMetrics(b *testing.B) { } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { OCToMetrics(nil, ocResource, ocMetrics) } } @@ -178,7 +178,7 @@ func BenchmarkMetricDoubleOCToMetrics(b *testing.B) { } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { OCToMetrics(nil, ocResource, ocMetrics) } } @@ -192,7 +192,7 @@ func BenchmarkMetricHistogramOCToMetrics(b *testing.B) { } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { OCToMetrics(nil, ocResource, ocMetrics) } } diff --git a/pkg/translator/opencensus/oc_to_resource_test.go b/pkg/translator/opencensus/oc_to_resource_test.go index a217f12f43a25..fbbd78490d6cd 100644 --- a/pkg/translator/opencensus/oc_to_resource_test.go +++ b/pkg/translator/opencensus/oc_to_resource_test.go @@ -59,7 +59,7 @@ func BenchmarkOcNodeResourceToInternal(b *testing.B) { ocResource := generateOcResource() b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { resource := pcommon.NewResource() ocNodeResourceToInternal(ocNode, ocResource, resource) if ocNode.Identifier.Pid != 123 { @@ -81,7 +81,7 @@ func BenchmarkOcResourceNodeUnmarshal(b *testing.B) { } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { unmarshalOc := &agenttracepb.ExportTraceServiceRequest{} if err := proto.Unmarshal(bytes, unmarshalOc); err != nil { b.Fail() diff --git a/pkg/translator/opencensus/oc_to_traces_test.go b/pkg/translator/opencensus/oc_to_traces_test.go index 862c26e455fa5..a50f0e0587ca9 100644 --- a/pkg/translator/opencensus/oc_to_traces_test.go +++ b/pkg/translator/opencensus/oc_to_traces_test.go @@ -403,7 +403,7 @@ func BenchmarkSpansWithAttributesOCToInternal(b *testing.B) { spans := []*octrace.Span{generateSpanWithAttributes(15)} b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { OCToTraces(nil, resource, spans) } } @@ -417,7 +417,7 @@ func BenchmarkSpansWithAttributesUnmarshal(b *testing.B) { } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { unmarshalOc := &octrace.Span{} if err := proto.Unmarshal(bytes, unmarshalOc); err != nil { b.Fail() @@ -442,7 +442,7 @@ func generateSpanWithAttributes(length int) *octrace.Span { ocSpan2.Attributes.AttributeMap = make(map[string]*octrace.AttributeValue, length) ocAttr := ocSpan2.Attributes.AttributeMap - for i := 0; i < length; i++ { + for i := range length { ocAttr["span-link-attr_"+strconv.Itoa(i)] = &octrace.AttributeValue{ Value: &octrace.AttributeValue_StringValue{ StringValue: &octrace.TruncatableString{Value: "span-link-attr-val"}, diff --git a/pkg/translator/opencensus/resource_to_oc_test.go b/pkg/translator/opencensus/resource_to_oc_test.go index 1b2e6388a102d..336a3fef56043 100644 --- a/pkg/translator/opencensus/resource_to_oc_test.go +++ b/pkg/translator/opencensus/resource_to_oc_test.go @@ -224,7 +224,7 @@ func BenchmarkInternalResourceToOC(b *testing.B) { resource := generateResourceWithOcNodeAndResource() b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { ocNode, _ := internalResourceToOC(resource) if ocNode.Identifier.Pid != 123 { b.Fail() @@ -240,7 +240,7 @@ func BenchmarkOcResourceNodeMarshal(b *testing.B) { } b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { if _, err := proto.Marshal(oc); err != nil { b.Fail() } diff --git a/pkg/translator/opencensus/traces_to_oc.go b/pkg/translator/opencensus/traces_to_oc.go index 0cea312203c20..9bf3c2aeb8b9c 100644 --- a/pkg/translator/opencensus/traces_to_oc.go +++ b/pkg/translator/opencensus/traces_to_oc.go @@ -32,11 +32,11 @@ func ResourceSpansToOC(rs ptrace.ResourceSpans) (*occommon.Node, *ocresource.Res // Approximate the number of the spans as the number of the spans in the first // instrumentation library info. ocSpans := make([]*octrace.Span, 0, ilss.At(0).Spans().Len()) - for i := 0; i < ilss.Len(); i++ { + for i := range ilss.Len() { ils := ilss.At(i) // TODO: Handle instrumentation library name and version. spans := ils.Spans() - for j := 0; j < spans.Len(); j++ { + for j := range spans.Len() { ocSpans = append(ocSpans, spanToOC(spans.At(j))) } } @@ -247,7 +247,7 @@ func eventsToOC(events ptrace.SpanEventSlice, droppedCount uint32) *octrace.Span } ocEvents := make([]*octrace.Span_TimeEvent, 0, events.Len()) - for i := 0; i < events.Len(); i++ { + for i := range events.Len() { ocEvents = append(ocEvents, eventToOC(events.At(i))) } @@ -319,7 +319,7 @@ func linksToOC(links ptrace.SpanLinkSlice, droppedCount uint32) *octrace.Span_Li } ocLinks := make([]*octrace.Span_Link, 0, links.Len()) - for i := 0; i < links.Len(); i++ { + for i := range links.Len() { link := links.At(i) ocLink := &octrace.Span_Link{ TraceId: traceIDToOC(link.TraceID()), diff --git a/pkg/translator/prometheusremotewrite/helper.go b/pkg/translator/prometheusremotewrite/helper.go index 13b95085d4593..d9e2dad7bce1b 100644 --- a/pkg/translator/prometheusremotewrite/helper.go +++ b/pkg/translator/prometheusremotewrite/helper.go @@ -216,7 +216,7 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { func (c *prometheusConverter) addHistogramDataPoints(dataPoints pmetric.HistogramDataPointSlice, resource pcommon.Resource, settings Settings, baseName string, ) { - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false) @@ -301,7 +301,7 @@ type exemplarType interface { func getPromExemplars[T exemplarType](pt T) []prompb.Exemplar { promExemplars := make([]prompb.Exemplar, 0, pt.Exemplars().Len()) - for i := 0; i < pt.Exemplars().Len(); i++ { + for i := range pt.Exemplars().Len() { exemplar := pt.Exemplars().At(i) exemplarRunes := 0 @@ -371,27 +371,27 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { switch metric.Type() { case pmetric.MetricTypeGauge: dataPoints := metric.Gauge().DataPoints() - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { ts = max(ts, dataPoints.At(x).Timestamp()) } case pmetric.MetricTypeSum: dataPoints := metric.Sum().DataPoints() - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { ts = max(ts, dataPoints.At(x).Timestamp()) } case pmetric.MetricTypeHistogram: dataPoints := metric.Histogram().DataPoints() - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { ts = max(ts, dataPoints.At(x).Timestamp()) } case pmetric.MetricTypeExponentialHistogram: dataPoints := metric.ExponentialHistogram().DataPoints() - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { ts = max(ts, dataPoints.At(x).Timestamp()) } case pmetric.MetricTypeSummary: dataPoints := metric.Summary().DataPoints() - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { ts = max(ts, dataPoints.At(x).Timestamp()) } } @@ -401,7 +401,7 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { func (c *prometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, settings Settings, baseName string, ) { - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false) @@ -430,7 +430,7 @@ func (c *prometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDat c.addSample(count, countlabels) // process each percentile/quantile - for i := 0; i < pt.QuantileValues().Len(); i++ { + for i := range pt.QuantileValues().Len() { qt := pt.QuantileValues().At(i) quantile := &prompb.Sample{ Value: qt.Value(), diff --git a/pkg/translator/prometheusremotewrite/helper_test.go b/pkg/translator/prometheusremotewrite/helper_test.go index faff6b9eafc97..554702f285205 100644 --- a/pkg/translator/prometheusremotewrite/helper_test.go +++ b/pkg/translator/prometheusremotewrite/helper_test.go @@ -173,7 +173,7 @@ func TestPrometheusConverter_addSample(t *testing.T) { // Test_timeSeriesSignature checks that timeSeriesSignature returns consistent and unique signatures for a distinct label set. func Test_timeSeriesSignature(t *testing.T) { var oneKBLabels []prompb.Label - for i := 0; i < 100; i++ { + for range 100 { const name = "12345" const value = "12345" oneKBLabels = append(oneKBLabels, prompb.Label{Name: name, Value: value}) @@ -387,7 +387,7 @@ func BenchmarkCreateAttributes(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { createAttributes(r, m, ext, nil, true) } } diff --git a/pkg/translator/prometheusremotewrite/histograms.go b/pkg/translator/prometheusremotewrite/histograms.go index 4ac4d58a96d41..4f2f07ba8f5a9 100644 --- a/pkg/translator/prometheusremotewrite/histograms.go +++ b/pkg/translator/prometheusremotewrite/histograms.go @@ -19,7 +19,7 @@ const defaultZeroThreshold = 1e-128 func (c *prometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice, resource pcommon.Resource, settings Settings, baseName string, ) error { - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { pt := dataPoints.At(x) lbls := createAttributes( resource, @@ -142,7 +142,7 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, Length: 0, }) - for i := 0; i < numBuckets; i++ { + for i := range numBuckets { // The offset is scaled and adjusted by 1 as described above. nextBucketIdx := (int32(i)+buckets.Offset())>>scaleDown + 1 if bucketIdx == nextBucketIdx { // We have not collected enough buckets to merge yet. @@ -166,7 +166,7 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, } else { // We have found a small gap (or no gap at all). // Insert empty buckets as needed. - for j := int32(0); j < gap; j++ { + for range int32(gap) { appendDelta(0) } } @@ -187,7 +187,7 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, } else { // We have found a small gap (or no gap at all). // Insert empty buckets as needed. - for j := int32(0); j < gap; j++ { + for range int32(gap) { appendDelta(0) } } diff --git a/pkg/translator/prometheusremotewrite/histograms_test.go b/pkg/translator/prometheusremotewrite/histograms_test.go index d2b3cba24ae24..a1b7be9ed6bde 100644 --- a/pkg/translator/prometheusremotewrite/histograms_test.go +++ b/pkg/translator/prometheusremotewrite/histograms_test.go @@ -386,7 +386,7 @@ func BenchmarkConvertBucketLayout(b *testing.B) { for _, scenario := range scenarios { buckets := pmetric.NewExponentialHistogramDataPointBuckets() buckets.SetOffset(0) - for i := 0; i < 1000; i++ { + for i := range 1000 { if i%(scenario.gap+1) == 0 { buckets.BucketCounts().Append(10) } else { @@ -394,7 +394,7 @@ func BenchmarkConvertBucketLayout(b *testing.B) { } } b.Run(fmt.Sprintf("gap %d", scenario.gap), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { convertBucketsLayout(buckets, 0) } }) diff --git a/pkg/translator/prometheusremotewrite/metrics_to_prw.go b/pkg/translator/prometheusremotewrite/metrics_to_prw.go index f5a487562ff0a..0b770c38ad66a 100644 --- a/pkg/translator/prometheusremotewrite/metrics_to_prw.go +++ b/pkg/translator/prometheusremotewrite/metrics_to_prw.go @@ -55,18 +55,18 @@ func newPrometheusConverter() *prometheusConverter { // fromMetrics converts pmetric.Metrics to Prometheus remote write format. func (c *prometheusConverter) fromMetrics(md pmetric.Metrics, settings Settings) (errs error) { resourceMetricsSlice := md.ResourceMetrics() - for i := 0; i < resourceMetricsSlice.Len(); i++ { + for i := range resourceMetricsSlice.Len() { resourceMetrics := resourceMetricsSlice.At(i) resource := resourceMetrics.Resource() scopeMetricsSlice := resourceMetrics.ScopeMetrics() // keep track of the most recent timestamp in the ResourceMetrics for // use with the "target" info metric var mostRecentTimestamp pcommon.Timestamp - for j := 0; j < scopeMetricsSlice.Len(); j++ { + for j := range scopeMetricsSlice.Len() { metricSlice := scopeMetricsSlice.At(j).Metrics() // TODO: decide if instrumentation library information should be exported as labels - for k := 0; k < metricSlice.Len(); k++ { + for k := range metricSlice.Len() { metric := metricSlice.At(k) mostRecentTimestamp = max(mostRecentTimestamp, mostRecentTimestampInMetric(metric)) diff --git a/pkg/translator/prometheusremotewrite/metrics_to_prw_test.go b/pkg/translator/prometheusremotewrite/metrics_to_prw_test.go index e79bd24f49d4a..7e76cab9908d9 100644 --- a/pkg/translator/prometheusremotewrite/metrics_to_prw_test.go +++ b/pkg/translator/prometheusremotewrite/metrics_to_prw_test.go @@ -36,7 +36,7 @@ func BenchmarkFromMetrics(b *testing.B) { b.Run(fmt.Sprintf("exemplars per series: %v", exemplarsPerSeries), func(b *testing.B) { payload := createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCount, labelsPerMetric, exemplarsPerSeries, pcommon.Timestamp(uint64(time.Now().UnixNano()))) - for i := 0; i < b.N; i++ { + for range b.N { tsMap, err := FromMetrics(payload.Metrics(), Settings{}) require.NoError(b, err) require.NotNil(b, tsMap) @@ -73,7 +73,7 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { b.Run(fmt.Sprintf("exemplars per series: %v", exemplarsPerSeries), func(b *testing.B) { payload := createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCount, labelsPerMetric, exemplarsPerSeries, pcommon.Timestamp(uint64(time.Now().UnixNano()))) - for i := 0; i < b.N; i++ { + for range b.N { converter := newPrometheusConverter() require.NoError(b, converter.fromMetrics(payload.Metrics(), Settings{})) require.NotNil(b, converter.timeSeries()) diff --git a/pkg/translator/prometheusremotewrite/metrics_to_prw_v2.go b/pkg/translator/prometheusremotewrite/metrics_to_prw_v2.go index 6385a258575a7..897ffff2b4c6a 100644 --- a/pkg/translator/prometheusremotewrite/metrics_to_prw_v2.go +++ b/pkg/translator/prometheusremotewrite/metrics_to_prw_v2.go @@ -48,18 +48,18 @@ func newPrometheusConverterV2() *prometheusConverterV2 { // fromMetrics converts pmetric.Metrics to Prometheus remote write format. func (c *prometheusConverterV2) fromMetrics(md pmetric.Metrics, settings Settings) (errs error) { resourceMetricsSlice := md.ResourceMetrics() - for i := 0; i < resourceMetricsSlice.Len(); i++ { + for i := range resourceMetricsSlice.Len() { resourceMetrics := resourceMetricsSlice.At(i) resource := resourceMetrics.Resource() scopeMetricsSlice := resourceMetrics.ScopeMetrics() // keep track of the most recent timestamp in the ResourceMetrics for // use with the "target" info metric var mostRecentTimestamp pcommon.Timestamp - for j := 0; j < scopeMetricsSlice.Len(); j++ { + for j := range scopeMetricsSlice.Len() { metricSlice := scopeMetricsSlice.At(j).Metrics() // TODO: decide if instrumentation library information should be exported as labels - for k := 0; k < metricSlice.Len(); k++ { + for k := range metricSlice.Len() { metric := metricSlice.At(k) mostRecentTimestamp = max(mostRecentTimestamp, mostRecentTimestampInMetric(metric)) diff --git a/pkg/translator/prometheusremotewrite/number_data_points.go b/pkg/translator/prometheusremotewrite/number_data_points.go index 51d3b9c9017a8..77dc701e795e4 100644 --- a/pkg/translator/prometheusremotewrite/number_data_points.go +++ b/pkg/translator/prometheusremotewrite/number_data_points.go @@ -16,7 +16,7 @@ import ( func (c *prometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.NumberDataPointSlice, resource pcommon.Resource, settings Settings, name string, ) { - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { pt := dataPoints.At(x) labels := createAttributes( resource, @@ -47,7 +47,7 @@ func (c *prometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.Number func (c *prometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDataPointSlice, resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, ) { - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { pt := dataPoints.At(x) lbls := createAttributes( resource, diff --git a/pkg/translator/prometheusremotewrite/number_data_points_v2.go b/pkg/translator/prometheusremotewrite/number_data_points_v2.go index a264e27c7c899..27bcbff024d58 100644 --- a/pkg/translator/prometheusremotewrite/number_data_points_v2.go +++ b/pkg/translator/prometheusremotewrite/number_data_points_v2.go @@ -18,7 +18,7 @@ import ( func (c *prometheusConverterV2) addGaugeNumberDataPoints(dataPoints pmetric.NumberDataPointSlice, resource pcommon.Resource, settings Settings, name string, ) { - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { pt := dataPoints.At(x) labels := createAttributes( @@ -51,7 +51,7 @@ func (c *prometheusConverterV2) addGaugeNumberDataPoints(dataPoints pmetric.Numb func (c *prometheusConverterV2) addSumNumberDataPoints(dataPoints pmetric.NumberDataPointSlice, resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, ) { - for x := 0; x < dataPoints.Len(); x++ { + for x := range dataPoints.Len() { pt := dataPoints.At(x) lbls := createAttributes( resource, @@ -102,7 +102,7 @@ func (c *prometheusConverterV2) addSumNumberDataPoints(dataPoints pmetric.Number // getPromExemplarsV2 returns a slice of writev2.Exemplar from pdata exemplars. func getPromExemplarsV2[T exemplarType](pt T) []writev2.Exemplar { promExemplars := make([]writev2.Exemplar, 0, pt.Exemplars().Len()) - for i := 0; i < pt.Exemplars().Len(); i++ { + for i := range pt.Exemplars().Len() { exemplar := pt.Exemplars().At(i) var promExemplar writev2.Exemplar diff --git a/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go index 451731fc79fe1..bb089b9ccf4a5 100644 --- a/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go +++ b/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go @@ -46,21 +46,21 @@ func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes bool) []*prompb resourceMetricsSlice := md.ResourceMetrics() metadataLength := 0 - for i := 0; i < resourceMetricsSlice.Len(); i++ { + for i := range resourceMetricsSlice.Len() { scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics() - for j := 0; j < scopeMetricsSlice.Len(); j++ { + for j := range scopeMetricsSlice.Len() { metadataLength += scopeMetricsSlice.At(j).Metrics().Len() } } metadata := make([]*prompb.MetricMetadata, 0, metadataLength) - for i := 0; i < resourceMetricsSlice.Len(); i++ { + for i := range resourceMetricsSlice.Len() { resourceMetrics := resourceMetricsSlice.At(i) scopeMetricsSlice := resourceMetrics.ScopeMetrics() - for j := 0; j < scopeMetricsSlice.Len(); j++ { + for j := range scopeMetricsSlice.Len() { scopeMetrics := scopeMetricsSlice.At(j) - for k := 0; k < scopeMetrics.Metrics().Len(); k++ { + for k := range scopeMetrics.Metrics().Len() { metric := scopeMetrics.Metrics().At(k) entry := prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), diff --git a/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata_test.go b/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata_test.go index de90eaab73da3..49bc4da6c2eb8 100644 --- a/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata_test.go +++ b/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata_test.go @@ -221,7 +221,7 @@ func TestOtelMetricsToMetadata(t *testing.T) { t.Run(tt.name, func(t *testing.T) { metaData := OtelMetricsToMetadata(tt.metrics, false) - for i := 0; i < len(metaData); i++ { + for i := range len(metaData) { assert.Equal(t, tt.want[i].Type, metaData[i].Type) assert.Equal(t, tt.want[i].MetricFamilyName, metaData[i].MetricFamilyName) assert.Equal(t, tt.want[i].Help, metaData[i].Help) diff --git a/pkg/translator/signalfx/from_metrics.go b/pkg/translator/signalfx/from_metrics.go index 9d10a3491b8b4..98389edcdaaa7 100644 --- a/pkg/translator/signalfx/from_metrics.go +++ b/pkg/translator/signalfx/from_metrics.go @@ -40,13 +40,13 @@ func (ft *FromTranslator) FromMetrics(md pmetric.Metrics, dropHistogramBuckets b var sfxDataPoints []*sfxpb.DataPoint rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { rm := rms.At(i) extraDimensions := attributesToDimensions(rm.Resource().Attributes(), nil) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { ilm := rm.ScopeMetrics().At(j) - for k := 0; k < ilm.Metrics().Len(); k++ { + for k := range ilm.Metrics().Len() { sfxDataPoints = append(sfxDataPoints, ft.FromMetric(ilm.Metrics().At(k), extraDimensions, dropHistogramBuckets, processHistograms)...) } } @@ -116,7 +116,7 @@ func fromMetricTypeToMetricType(metric pmetric.Metric) *sfxpb.MetricType { func convertNumberDataPoints(in pmetric.NumberDataPointSlice, name string, mt *sfxpb.MetricType, extraDims []*sfxpb.Dimension) []*sfxpb.DataPoint { dps := newDpsBuilder(in.Len()) - for i := 0; i < in.Len(); i++ { + for i := range in.Len() { inDp := in.At(i) dp := dps.appendPoint(name, mt, fromTimestamp(inDp.Timestamp()), attributesToDimensions(inDp.Attributes(), extraDims)) @@ -135,7 +135,7 @@ func convertNumberDataPoints(in pmetric.NumberDataPointSlice, name string, mt *s func convertHistogram(in pmetric.HistogramDataPointSlice, name string, mt *sfxpb.MetricType, extraDims []*sfxpb.Dimension, dropHistogramBuckets bool) []*sfxpb.DataPoint { var numDPs int - for i := 0; i < in.Len(); i++ { + for i := range in.Len() { histDP := in.At(i) numDPs += 1 + histDP.BucketCounts().Len() if histDP.HasSum() { @@ -152,7 +152,7 @@ func convertHistogram(in pmetric.HistogramDataPointSlice, name string, mt *sfxpb } dps := newDpsBuilder(numDPs) - for i := 0; i < in.Len(); i++ { + for i := range in.Len() { histDP := in.At(i) ts := fromTimestamp(histDP.Timestamp()) dims := attributesToDimensions(histDP.Attributes(), extraDims) @@ -197,7 +197,7 @@ func convertHistogram(in pmetric.HistogramDataPointSlice, name string, mt *sfxpb bucketMetricName := name + "_bucket" var val uint64 - for j := 0; j < counts.Len(); j++ { + for j := range counts.Len() { val += counts.At(j) bound := infinityBoundSFxDimValue if j < bounds.Len() { @@ -220,12 +220,12 @@ func convertHistogram(in pmetric.HistogramDataPointSlice, name string, mt *sfxpb func convertSummaryDataPoints(in pmetric.SummaryDataPointSlice, name string, extraDims []*sfxpb.Dimension) []*sfxpb.DataPoint { var numDPs int - for i := 0; i < in.Len(); i++ { + for i := range in.Len() { numDPs += 2 + in.At(i).QuantileValues().Len() } dps := newDpsBuilder(numDPs) - for i := 0; i < in.Len(); i++ { + for i := range in.Len() { inDp := in.At(i) dims := attributesToDimensions(inDp.Attributes(), extraDims) @@ -241,7 +241,7 @@ func convertSummaryDataPoints(in pmetric.SummaryDataPointSlice, name string, ext sumDP.Value.DoubleValue = &sum qvs := inDp.QuantileValues() - for j := 0; j < qvs.Len(); j++ { + for j := range qvs.Len() { qv := qvs.At(j) cloneDim := make([]*sfxpb.Dimension, len(dims)+1) copy(cloneDim, dims) diff --git a/pkg/translator/signalfx/from_metrics_test.go b/pkg/translator/signalfx/from_metrics_test.go index 70d091ac40806..0554a3adf13ac 100644 --- a/pkg/translator/signalfx/from_metrics_test.go +++ b/pkg/translator/signalfx/from_metrics_test.go @@ -340,7 +340,7 @@ func Test_FromMetrics(t *testing.T) { dp.SetSum(123.4) dp.SetCount(111) qvs := dp.QuantileValues() - for i := 0; i < 4; i++ { + for i := range 4 { qv := qvs.AppendEmpty() qv.SetQuantile(0.25 * float64(i+1)) qv.SetValue(float64(i)) diff --git a/pkg/translator/signalfx/to_metrics_test.go b/pkg/translator/signalfx/to_metrics_test.go index f6f61cfb332df..fbb9a922286e9 100644 --- a/pkg/translator/signalfx/to_metrics_test.go +++ b/pkg/translator/signalfx/to_metrics_test.go @@ -334,7 +334,7 @@ func sfxTypePtr(t sfxpb.MetricType) *sfxpb.MetricType { func buildNDimensions(n uint) []*sfxpb.Dimension { d := make([]*sfxpb.Dimension, 0, n) - for i := uint(0); i < n; i++ { + for i := range uint(n) { idx := int(i) suffix := strconv.Itoa(idx) d = append(d, &sfxpb.Dimension{ diff --git a/pkg/translator/skywalking/skywalkingproto_to_traces.go b/pkg/translator/skywalking/skywalkingproto_to_traces.go index 25bf858961196..a30da2f95db48 100644 --- a/pkg/translator/skywalking/skywalkingproto_to_traces.go +++ b/pkg/translator/skywalking/skywalkingproto_to_traces.go @@ -309,7 +309,7 @@ func swStringToUUID(s string, extra uint32) (dst [16]byte) { return uid } - for i := 0; i < 4; i++ { + for i := range 4 { uid[i] ^= byte(extra) extra >>= 8 } @@ -350,7 +350,7 @@ func swStringToUUID(s string, extra uint32) (dst [16]byte) { func uuidTo8Bytes(uuid [16]byte) [8]byte { // high bit XOR low bit var dst [8]byte - for i := 0; i < 8; i++ { + for i := range 8 { dst[i] = uuid[i] ^ uuid[i+8] } return dst diff --git a/pkg/translator/skywalking/skywalkingproto_to_traces_test.go b/pkg/translator/skywalking/skywalkingproto_to_traces_test.go index c3d14b1b007f1..0a01061048454 100644 --- a/pkg/translator/skywalking/skywalkingproto_to_traces_test.go +++ b/pkg/translator/skywalking/skywalkingproto_to_traces_test.go @@ -216,7 +216,7 @@ func Test_stringToTraceID_Unique(t *testing.T) { } var results [2][16]byte - for i := 0; i < len(tests); i++ { + for i := range len(tests) { tt := tests[i] t.Run(tt.name, func(_ *testing.T) { got := swTraceIDToTraceID(tt.segmentObject.traceID) @@ -290,7 +290,7 @@ func Test_segmentIdToSpanId_Unique(t *testing.T) { }, } var results [2][8]byte - for i := 0; i < 2; i++ { + for i := range 2 { tt := tests[i] t.Run(tt.name, func(_ *testing.T) { got := segmentIDToSpanID(tt.args.segmentID, tt.args.spanID) diff --git a/pkg/translator/zipkin/zipkinthriftconverter/deserialize.go b/pkg/translator/zipkin/zipkinthriftconverter/deserialize.go index c489a034cf867..2663de4be7c75 100644 --- a/pkg/translator/zipkin/zipkinthriftconverter/deserialize.go +++ b/pkg/translator/zipkin/zipkinthriftconverter/deserialize.go @@ -45,7 +45,7 @@ func DeserializeThrift(ctx context.Context, b []byte) ([]*zipkincore.Span, error // We don't depend on the size returned by ReadListBegin to preallocate the array because it // sometimes returns a nil error on bad input and provides an unreasonably large int for size var spans []*zipkincore.Span - for i := 0; i < size; i++ { + for range size { zs := &zipkincore.Span{} if err = zs.Read(ctx, transport); err != nil { return nil, err diff --git a/pkg/translator/zipkin/zipkinv1/thrift_test.go b/pkg/translator/zipkin/zipkinv1/thrift_test.go index 318b3bc176540..3558330e89ffd 100644 --- a/pkg/translator/zipkin/zipkinv1/thrift_test.go +++ b/pkg/translator/zipkin/zipkinv1/thrift_test.go @@ -29,7 +29,7 @@ func compareTraces(t *testing.T, want ptrace.Traces, got ptrace.Traces) { func mapperTraces(t *testing.T, td ptrace.Traces) map[string]map[string]ptrace.Span { ret := map[string]map[string]ptrace.Span{} - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { rs := td.ResourceSpans().At(i) service, found := rs.Resource().Attributes().Get(conventions.AttributeServiceName) require.True(t, found) @@ -39,7 +39,7 @@ func mapperTraces(t *testing.T, td ptrace.Traces) map[string]map[string]ptrace.S ret[service.Str()] = map[string]ptrace.Span{} } spans := rs.ScopeSpans().At(0).Spans() - for j := 0; j < spans.Len(); j++ { + for j := range spans.Len() { sps[spans.At(j).Name()] = spans.At(j) } } @@ -109,7 +109,7 @@ func BenchmarkV1ThriftToOCProto(b *testing.B) { err = json.Unmarshal(blob, &ztSpans) require.NoError(b, err, "Failed to unmarshal json into zipkin v1 thrift") - for n := 0; n < b.N; n++ { + for range b.N { _, err = thriftBatchToTraces(ztSpans) require.NoError(b, err) } diff --git a/pkg/translator/zipkin/zipkinv2/from_translator.go b/pkg/translator/zipkin/zipkinv2/from_translator.go index 6c63d438c340f..38165bb5e993a 100644 --- a/pkg/translator/zipkin/zipkinv2/from_translator.go +++ b/pkg/translator/zipkin/zipkinv2/from_translator.go @@ -42,7 +42,7 @@ func (t FromTranslator) FromTraces(td ptrace.Traces) ([]*zipkinmodel.SpanModel, zSpans := make([]*zipkinmodel.SpanModel, 0, td.SpanCount()) - for i := 0; i < resourceSpans.Len(); i++ { + for i := range resourceSpans.Len() { batch, err := resourceSpansToZipkinSpans(resourceSpans.At(i), td.SpanCount()/resourceSpans.Len()) if err != nil { return zSpans, err @@ -66,11 +66,11 @@ func resourceSpansToZipkinSpans(rs ptrace.ResourceSpans, estSpanCount int) ([]*z localServiceName, zTags := resourceToZipkinEndpointServiceNameAndAttributeMap(resource) zSpans := make([]*zipkinmodel.SpanModel, 0, estSpanCount) - for i := 0; i < ilss.Len(); i++ { + for i := range ilss.Len() { ils := ilss.At(i) extractScopeTags(ils.Scope(), zTags) spans := ils.Spans() - for j := 0; j < spans.Len(); j++ { + for j := range spans.Len() { zSpan, err := spanToZipkinSpan(spans.At(j), localServiceName, zTags) if err != nil { return zSpans, err @@ -198,7 +198,7 @@ func aggregateSpanTags(span ptrace.Span, zTags map[string]string) map[string]str func spanEventsToZipkinAnnotations(events ptrace.SpanEventSlice, zs *zipkinmodel.SpanModel) error { if events.Len() > 0 { zAnnos := make([]zipkinmodel.Annotation, events.Len()) - for i := 0; i < events.Len(); i++ { + for i := range events.Len() { event := events.At(i) if event.Attributes().Len() == 0 && event.DroppedAttributesCount() == 0 { zAnnos[i] = zipkinmodel.Annotation{ @@ -223,7 +223,7 @@ func spanEventsToZipkinAnnotations(events ptrace.SpanEventSlice, zs *zipkinmodel } func spanLinksToZipkinTags(links ptrace.SpanLinkSlice, zTags map[string]string) error { - for i := 0; i < links.Len(); i++ { + for i := range links.Len() { link := links.At(i) key := fmt.Sprintf("otlp.link.%d", i) jsonStr, err := json.Marshal(link.Attributes().AsRaw()) @@ -360,7 +360,7 @@ func zipkinEndpointFromTags( } func isIPv6Address(ipStr string) bool { - for i := 0; i < len(ipStr); i++ { + for i := range len(ipStr) { if ipStr[i] == ':' { return true } diff --git a/pkg/translator/zipkin/zipkinv2/from_translator_test.go b/pkg/translator/zipkin/zipkinv2/from_translator_test.go index 2cdfff40f6bcb..beb6b5a1a27d8 100644 --- a/pkg/translator/zipkin/zipkinv2/from_translator_test.go +++ b/pkg/translator/zipkin/zipkinv2/from_translator_test.go @@ -102,11 +102,11 @@ func TestInternalTracesToZipkinSpansAndBack(t *testing.T) { assert.Equal(t, td.SpanCount(), tdFromZS.SpanCount()) // check that all timestamps converted back and forth without change - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { instSpans := td.ResourceSpans().At(i).ScopeSpans() - for j := 0; j < instSpans.Len(); j++ { + for j := range instSpans.Len() { spans := instSpans.At(j).Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { span := spans.At(k) // search for the span with the same id to compare to @@ -121,11 +121,11 @@ func TestInternalTracesToZipkinSpansAndBack(t *testing.T) { } func findSpanByID(rs ptrace.ResourceSpansSlice, spanID pcommon.SpanID) ptrace.Span { - for i := 0; i < rs.Len(); i++ { + for i := range rs.Len() { instSpans := rs.At(i).ScopeSpans() - for j := 0; j < instSpans.Len(); j++ { + for j := range instSpans.Len() { spans := instSpans.At(j).Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { span := spans.At(k) if span.SpanID() == spanID { return span diff --git a/pkg/translator/zipkin/zipkinv2/to_translator.go b/pkg/translator/zipkin/zipkinv2/to_translator.go index 53abad9653d02..d87eada7bc34a 100644 --- a/pkg/translator/zipkin/zipkinv2/to_translator.go +++ b/pkg/translator/zipkin/zipkinv2/to_translator.go @@ -184,7 +184,7 @@ func zipkinKindToSpanKind(kind zipkinmodel.Kind, tags map[string]string) ptrace. } func zTagsToSpanLinks(tags map[string]string, dest ptrace.SpanLinkSlice) error { - for i := 0; i < 128; i++ { + for i := range 128 { key := fmt.Sprintf("otlp.link.%d", i) val, ok := tags[key] if !ok { diff --git a/processor/attributesprocessor/attributes_log.go b/processor/attributesprocessor/attributes_log.go index 310f64f7e1963..373a4f5c56c06 100644 --- a/processor/attributesprocessor/attributes_log.go +++ b/processor/attributesprocessor/attributes_log.go @@ -33,15 +33,15 @@ func newLogAttributesProcessor(logger *zap.Logger, attrProc *attraction.AttrProc func (a *logAttributesProcessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rs := rls.At(i) ilss := rs.ScopeLogs() resource := rs.Resource() - for j := 0; j < ilss.Len(); j++ { + for j := range ilss.Len() { ils := ilss.At(j) logs := ils.LogRecords() library := ils.Scope() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { lr := logs.At(k) if a.skipExpr != nil { skip, err := a.skipExpr.Eval(ctx, ottllog.NewTransformContext(lr, library, resource, ils, rs)) diff --git a/processor/attributesprocessor/attributes_log_test.go b/processor/attributesprocessor/attributes_log_test.go index cf49903330e91..c2d69fa79e950 100644 --- a/processor/attributesprocessor/attributes_log_test.go +++ b/processor/attributesprocessor/attributes_log_test.go @@ -458,7 +458,7 @@ func BenchmarkAttributes_FilterLogsByName(b *testing.B) { td := generateLogData(tt.name, tt.inputAttributes) b.Run(tt.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { assert.NoError(b, tp.ConsumeLogs(context.Background(), td)) } }) diff --git a/processor/attributesprocessor/attributes_metric.go b/processor/attributesprocessor/attributes_metric.go index 3ae4afa773198..cb83b1fe604e7 100644 --- a/processor/attributesprocessor/attributes_metric.go +++ b/processor/attributesprocessor/attributes_metric.go @@ -33,15 +33,15 @@ func newMetricAttributesProcessor(logger *zap.Logger, attrProc *attraction.AttrP func (a *metricAttributesProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { rs := rms.At(i) resource := rs.Resource() ilms := rs.ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { ils := ilms.At(j) scope := ils.Scope() metrics := ils.Metrics() - for k := 0; k < metrics.Len(); k++ { + for k := range metrics.Len() { m := metrics.At(k) if a.skipExpr != nil { skip, err := a.skipExpr.Eval(ctx, ottlmetric.NewTransformContext(m, metrics, scope, resource, ils, rs)) @@ -68,27 +68,27 @@ func (a *metricAttributesProcessor) processMetricAttributes(ctx context.Context, switch m.Type() { case pmetric.MetricTypeGauge: dps := m.Gauge().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { a.attrProc.Process(ctx, a.logger, dps.At(i).Attributes()) } case pmetric.MetricTypeSum: dps := m.Sum().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { a.attrProc.Process(ctx, a.logger, dps.At(i).Attributes()) } case pmetric.MetricTypeHistogram: dps := m.Histogram().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { a.attrProc.Process(ctx, a.logger, dps.At(i).Attributes()) } case pmetric.MetricTypeExponentialHistogram: dps := m.ExponentialHistogram().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { a.attrProc.Process(ctx, a.logger, dps.At(i).Attributes()) } case pmetric.MetricTypeSummary: dps := m.Summary().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { a.attrProc.Process(ctx, a.logger, dps.At(i).Attributes()) } case pmetric.MetricTypeEmpty: diff --git a/processor/attributesprocessor/attributes_metric_test.go b/processor/attributesprocessor/attributes_metric_test.go index c8b0f49414f4d..3707039fee03b 100644 --- a/processor/attributesprocessor/attributes_metric_test.go +++ b/processor/attributesprocessor/attributes_metric_test.go @@ -450,7 +450,7 @@ func BenchmarkAttributes_FilterMetricsByName(b *testing.B) { md := generateMetricData(tc.name, tc.inputAttributes) b.Run(tc.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { assert.NoError(b, mp.ConsumeMetrics(context.Background(), md)) } }) diff --git a/processor/attributesprocessor/attributes_trace.go b/processor/attributesprocessor/attributes_trace.go index c3d1b6fd4fc17..317be8ea713a3 100644 --- a/processor/attributesprocessor/attributes_trace.go +++ b/processor/attributesprocessor/attributes_trace.go @@ -33,15 +33,15 @@ func newSpanAttributesProcessor(logger *zap.Logger, attrProc *attraction.AttrPro func (a *spanAttributesProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { rs := rss.At(i) resource := rs.Resource() ilss := rs.ScopeSpans() - for j := 0; j < ilss.Len(); j++ { + for j := range ilss.Len() { ils := ilss.At(j) spans := ils.Spans() scope := ils.Scope() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { span := spans.At(k) if a.skipExpr != nil { skip, err := a.skipExpr.Eval(ctx, ottlspan.NewTransformContext(span, scope, resource, ils, rs)) diff --git a/processor/attributesprocessor/attributes_trace_test.go b/processor/attributesprocessor/attributes_trace_test.go index 16020604130bb..282d52e033eab 100644 --- a/processor/attributesprocessor/attributes_trace_test.go +++ b/processor/attributesprocessor/attributes_trace_test.go @@ -506,7 +506,7 @@ func BenchmarkAttributes_FilterSpansByName(b *testing.B) { td := generateTraceData(tt.serviceName, tt.name, tt.inputAttributes) b.Run(tt.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { assert.NoError(b, tp.ConsumeTraces(context.Background(), td)) } }) diff --git a/processor/cumulativetodeltaprocessor/processor_test.go b/processor/cumulativetodeltaprocessor/processor_test.go index c36a8f06cb0b4..9250c0ace69e0 100644 --- a/processor/cumulativetodeltaprocessor/processor_test.go +++ b/processor/cumulativetodeltaprocessor/processor_test.go @@ -663,7 +663,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { require.Equal(t, expectedMetrics.Len(), actualMetrics.Len()) - for i := 0; i < expectedMetrics.Len(); i++ { + for i := range expectedMetrics.Len() { eM := expectedMetrics.At(i) aM := actualMetrics.At(i) @@ -674,7 +674,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { aDataPoints := aM.Gauge().DataPoints() require.Equal(t, eDataPoints.Len(), aDataPoints.Len()) - for j := 0; j < eDataPoints.Len(); j++ { + for j := range eDataPoints.Len() { require.Equal(t, eDataPoints.At(j).DoubleValue(), aDataPoints.At(j).DoubleValue()) } } @@ -686,7 +686,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { require.Equal(t, eDataPoints.Len(), aDataPoints.Len()) require.Equal(t, eM.Sum().AggregationTemporality(), aM.Sum().AggregationTemporality()) - for j := 0; j < eDataPoints.Len(); j++ { + for j := range eDataPoints.Len() { if math.IsNaN(eDataPoints.At(j).DoubleValue()) { assert.True(t, math.IsNaN(aDataPoints.At(j).DoubleValue())) } else { @@ -703,7 +703,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { require.Equal(t, eDataPoints.Len(), aDataPoints.Len()) require.Equal(t, eM.Histogram().AggregationTemporality(), aM.Histogram().AggregationTemporality()) - for j := 0; j < eDataPoints.Len(); j++ { + for j := range eDataPoints.Len() { require.Equal(t, eDataPoints.At(j).Count(), aDataPoints.At(j).Count()) require.Equal(t, eDataPoints.At(j).HasSum(), aDataPoints.At(j).HasSum()) require.Equal(t, eDataPoints.At(j).HasMin(), aDataPoints.At(j).HasMin()) @@ -793,7 +793,7 @@ func BenchmarkConsumeMetrics(b *testing.B) { assert.NoError(b, p.ConsumeMetrics(context.Background(), metrics)) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { reset() assert.NoError(b, p.ConsumeMetrics(context.Background(), metrics)) } diff --git a/processor/deltatocumulativeprocessor/internal/data/add.go b/processor/deltatocumulativeprocessor/internal/data/add.go index c1a1ee6ad8f72..e65b2accb25f4 100644 --- a/processor/deltatocumulativeprocessor/internal/data/add.go +++ b/processor/deltatocumulativeprocessor/internal/data/add.go @@ -38,7 +38,7 @@ func (dp Histogram) Add(in Histogram) Histogram { // given we have limited error handling at this stage (and already verified boundaries are correct), // doing a best-effort add of whatever we have appears reasonable. n := min(dp.BucketCounts().Len(), in.BucketCounts().Len()) - for i := 0; i < n; i++ { + for i := range n { sum := dp.BucketCounts().At(i) + in.BucketCounts().At(i) dp.BucketCounts().SetAt(i, sum) } diff --git a/processor/deltatocumulativeprocessor/internal/data/datatest/equal.go b/processor/deltatocumulativeprocessor/internal/data/datatest/equal.go index 1ad4f2d23569e..403323cf16e90 100644 --- a/processor/deltatocumulativeprocessor/internal/data/datatest/equal.go +++ b/processor/deltatocumulativeprocessor/internal/data/datatest/equal.go @@ -67,7 +67,7 @@ func equal(tb testing.TB, want, got any, name string) bool { ok := true // compare all "getters" of the struct - for i := 0; i < vw.NumMethod(); i++ { + for i := range vw.NumMethod() { mname := vw.Type().Method(i).Name fname := strings.TrimPrefix(name+"."+mname+"()", ".") @@ -90,7 +90,7 @@ func equal(tb testing.TB, want, got any, name string) bool { } // compare all exported fields of the struct - for i := 0; i < vw.NumField(); i++ { + for i := range vw.NumField() { if !vw.Type().Field(i).IsExported() { continue } diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/expo_test.go b/processor/deltatocumulativeprocessor/internal/data/expo/expo_test.go index 60be5a1980fd9..7afb4275e9d38 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo/expo_test.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo/expo_test.go @@ -40,7 +40,7 @@ func ExampleAbsolute() { } fmt.Printf("\n index:") - for i := 0; i < bs.BucketCounts().Len(); i++ { + for i := range bs.BucketCounts().Len() { fmt.Printf(" %d", i) } fmt.Printf("\n abs:") diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/expotest/bins.go b/processor/deltatocumulativeprocessor/internal/data/expo/expotest/bins.go index 13b4ce74c928c..2dba839fef68a 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo/expotest/bins.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo/expotest/bins.go @@ -24,7 +24,7 @@ type Bins [8]uint64 func (bins Bins) Into() expo.Buckets { start := 0 - for i := 0; i < len(bins); i++ { + for i := range bins { if bins[i] != ø { start = i break diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/merge.go b/processor/deltatocumulativeprocessor/internal/data/expo/merge.go index 82536ea1fa7e4..e80f2dd37bb26 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo/merge.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo/merge.go @@ -37,7 +37,7 @@ func Merge(arel, brel Buckets) { counts := pcommon.NewUInt64Slice() counts.Append(make([]uint64, size-counts.Len())...) - for i := 0; i < counts.Len(); i++ { + for i := range counts.Len() { counts.SetAt(i, a.Abs(lo+i)+b.Abs(lo+i)) } diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/scale.go b/processor/deltatocumulativeprocessor/internal/data/expo/scale.go index 50fdef75c9f65..1bf4ac717ef32 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo/scale.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo/scale.go @@ -87,7 +87,7 @@ func Collapse(bs Buckets) { } bs.SetOffset(bs.Offset() / 2) - for i := 0; i < size; i++ { + for i := range size { // size is ~half of len. we add two buckets per iteration. // k jumps in steps of 2, shifted if offset makes this necessary. k := i*2 + shift diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/scale_test.go b/processor/deltatocumulativeprocessor/internal/data/expo/scale_test.go index 22dbd1fbfc0b4..402c1feb123d9 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo/scale_test.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo/scale_test.go @@ -84,7 +84,7 @@ func TestDownscale(t *testing.T) { } is := datatest.New(t) - for i := 0; i < len(buckets)-1; i++ { + for i := range len(buckets) - 1 { expo.Downscale(buckets[i].bkt, buckets[i].scale, buckets[i+1].scale) is.Equalf(buckets[i+1].bkt.Offset(), buckets[i].bkt.Offset(), "offset") diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/zero_test.go b/processor/deltatocumulativeprocessor/internal/data/expo/zero_test.go index 6c095bc098d00..720b86596f99c 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo/zero_test.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo/zero_test.go @@ -88,7 +88,7 @@ func TestSlice(t *testing.T) { for _, cs := range cases { from, to := 0, len(cs.want) - for i := 0; i < len(cs.want); i++ { + for i := range len(cs.want) { if cs.want[i] != ø { from += i break diff --git a/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go b/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go index 6cc97af041325..0d5d468b144b9 100644 --- a/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go +++ b/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go @@ -12,7 +12,7 @@ func Equal[E comparable, S Slice[E]](a, b S) bool { if a.Len() != b.Len() { return false } - for i := 0; i < a.Len(); i++ { + for i := range a.Len() { if a.At(i) != b.At(i) { return false } @@ -22,7 +22,7 @@ func Equal[E comparable, S Slice[E]](a, b S) bool { func All[E any, S Slice[E]](slice S) func(func(E) bool) { return func(yield func(E) bool) { - for i := 0; i < slice.Len(); i++ { + for i := range slice.Len() { if !yield(slice.At(i)) { break } diff --git a/processor/deltatorateprocessor/processor.go b/processor/deltatorateprocessor/processor.go index a9ba3aca46973..576e92879f7e7 100644 --- a/processor/deltatorateprocessor/processor.go +++ b/processor/deltatorateprocessor/processor.go @@ -40,13 +40,13 @@ func (dtrp *deltaToRateProcessor) Start(context.Context, component.Host) error { func (dtrp *deltaToRateProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { resourceMetricsSlice := md.ResourceMetrics() - for i := 0; i < resourceMetricsSlice.Len(); i++ { + for i := range resourceMetricsSlice.Len() { rm := resourceMetricsSlice.At(i) ilms := rm.ScopeMetrics() - for i := 0; i < ilms.Len(); i++ { + for i := range ilms.Len() { ilm := ilms.At(i) metricSlice := ilm.Metrics() - for j := 0; j < metricSlice.Len(); j++ { + for j := range metricSlice.Len() { metric := metricSlice.At(j) if _, ok := dtrp.ConfiguredMetrics[metric.Name()]; !ok { continue @@ -57,7 +57,7 @@ func (dtrp *deltaToRateProcessor) processMetrics(_ context.Context, md pmetric.M } dataPointSlice := metric.Sum().DataPoints() - for i := 0; i < dataPointSlice.Len(); i++ { + for i := range dataPointSlice.Len() { dataPoint := dataPointSlice.At(i) durationNanos := time.Duration(dataPoint.Timestamp() - dataPoint.StartTimestamp()) diff --git a/processor/deltatorateprocessor/processor_test.go b/processor/deltatorateprocessor/processor_test.go index d05bdc78774d1..8f6e8620ad6e0 100644 --- a/processor/deltatorateprocessor/processor_test.go +++ b/processor/deltatorateprocessor/processor_test.go @@ -143,7 +143,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { require.Equal(t, expectedMetrics.Len(), actualMetrics.Len()) - for i := 0; i < expectedMetrics.Len(); i++ { + for i := range expectedMetrics.Len() { eM := expectedMetrics.At(i) aM := actualMetrics.At(i) @@ -154,7 +154,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { aDataPoints := aM.Gauge().DataPoints() require.Equal(t, eDataPoints.Len(), aDataPoints.Len()) - for j := 0; j < eDataPoints.Len(); j++ { + for j := range eDataPoints.Len() { require.Equal(t, eDataPoints.At(j).DoubleValue(), aDataPoints.At(j).DoubleValue()) } } @@ -166,7 +166,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { require.Equal(t, eDataPoints.Len(), aDataPoints.Len()) require.Equal(t, eM.Sum().AggregationTemporality(), aM.Sum().AggregationTemporality()) - for j := 0; j < eDataPoints.Len(); j++ { + for j := range eDataPoints.Len() { require.Equal(t, eDataPoints.At(j).DoubleValue(), aDataPoints.At(j).DoubleValue()) } } diff --git a/processor/filterprocessor/expr_test.go b/processor/filterprocessor/expr_test.go index 259328827c857..f44caf19faa2e 100644 --- a/processor/filterprocessor/expr_test.go +++ b/processor/filterprocessor/expr_test.go @@ -70,30 +70,30 @@ func testFilter(t *testing.T, mdType pmetric.MetricType, mvType pmetric.NumberDa for _, metrics := range filtered { filteredMetricCount += metrics.MetricCount() rmsSlice := metrics.ResourceMetrics() - for i := 0; i < rmsSlice.Len(); i++ { + for i := range rmsSlice.Len() { rms := rmsSlice.At(i) ilms := rms.ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { ilm := ilms.At(j) metricSlice := ilm.Metrics() - for k := 0; k < metricSlice.Len(); k++ { + for k := range metricSlice.Len() { metric := metricSlice.At(k) if metric.Name() == filteredMetric { dt := metric.Type() switch dt { case pmetric.MetricTypeGauge: pts := metric.Gauge().DataPoints() - for l := 0; l < pts.Len(); l++ { + for l := range pts.Len() { assertFiltered(t, pts.At(l).Attributes()) } case pmetric.MetricTypeSum: pts := metric.Sum().DataPoints() - for l := 0; l < pts.Len(); l++ { + for l := range pts.Len() { assertFiltered(t, pts.At(l).Attributes()) } case pmetric.MetricTypeHistogram: pts := metric.Histogram().DataPoints() - for l := 0; l < pts.Len(); l++ { + for l := range pts.Len() { assertFiltered(t, pts.At(l).Attributes()) } } @@ -160,7 +160,7 @@ func exprConfig(factory processor.Factory, include []string, exclude []string) c func testDataSlice(size int, mdType pmetric.MetricType, mvType pmetric.NumberDataPointValueType) []pmetric.Metrics { var out []pmetric.Metrics - for i := 0; i < 16; i++ { + for i := range 16 { out = append(out, testData(fmt.Sprintf("p%d_", i), size, mdType, mvType)) } return out diff --git a/processor/filterprocessor/metrics_test.go b/processor/filterprocessor/metrics_test.go index b55b8337d55ef..f51a1942ad3ab 100644 --- a/processor/filterprocessor/metrics_test.go +++ b/processor/filterprocessor/metrics_test.go @@ -463,7 +463,7 @@ func benchmarkFilter(b *testing.B, mp *filterconfig.MetricMatchProperties) { pdms := metricSlice(128) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { for _, pdm := range pdms { _ = proc.ConsumeMetrics(ctx, pdm) } @@ -472,7 +472,7 @@ func benchmarkFilter(b *testing.B, mp *filterconfig.MetricMatchProperties) { func metricSlice(numMetrics int) []pmetric.Metrics { var out []pmetric.Metrics - for i := 0; i < numMetrics; i++ { + for i := range numMetrics { const size = 2 out = append(out, pdm(fmt.Sprintf("p%d_", i), size)) } diff --git a/processor/geoipprocessor/geoip_processor_logs.go b/processor/geoipprocessor/geoip_processor_logs.go index 166cbf4871704..2908ae6a6f10c 100644 --- a/processor/geoipprocessor/geoip_processor_logs.go +++ b/processor/geoipprocessor/geoip_processor_logs.go @@ -11,7 +11,7 @@ import ( func (g *geoIPProcessor) processLogs(ctx context.Context, ls plog.Logs) (plog.Logs, error) { rl := ls.ResourceLogs() - for i := 0; i < rl.Len(); i++ { + for i := range rl.Len() { switch g.cfg.Context { case resource: err := g.processAttributes(ctx, rl.At(i).Resource().Attributes()) @@ -19,8 +19,8 @@ func (g *geoIPProcessor) processLogs(ctx context.Context, ls plog.Logs) (plog.Lo return ls, err } case record: - for j := 0; j < rl.At(i).ScopeLogs().Len(); j++ { - for k := 0; k < rl.At(i).ScopeLogs().At(j).LogRecords().Len(); k++ { + for j := range rl.At(i).ScopeLogs().Len() { + for k := range rl.At(i).ScopeLogs().At(j).LogRecords().Len() { err := g.processAttributes(ctx, rl.At(i).ScopeLogs().At(j).LogRecords().At(k).Attributes()) if err != nil { return ls, err diff --git a/processor/geoipprocessor/geoip_processor_metrics.go b/processor/geoipprocessor/geoip_processor_metrics.go index ada8710de234f..5114840930e0f 100644 --- a/processor/geoipprocessor/geoip_processor_metrics.go +++ b/processor/geoipprocessor/geoip_processor_metrics.go @@ -11,7 +11,7 @@ import ( func (g *geoIPProcessor) processMetrics(ctx context.Context, ms pmetric.Metrics) (pmetric.Metrics, error) { rm := ms.ResourceMetrics() - for i := 0; i < rm.Len(); i++ { + for i := range rm.Len() { switch g.cfg.Context { case resource: err := g.processAttributes(ctx, rm.At(i).Resource().Attributes()) @@ -19,8 +19,8 @@ func (g *geoIPProcessor) processMetrics(ctx context.Context, ms pmetric.Metrics) return ms, err } case record: - for j := 0; j < rm.At(i).ScopeMetrics().Len(); j++ { - for k := 0; k < rm.At(i).ScopeMetrics().At(j).Metrics().Len(); k++ { + for j := range rm.At(i).ScopeMetrics().Len() { + for k := range rm.At(i).ScopeMetrics().At(j).Metrics().Len() { err := g.processMetricAttributes(ctx, rm.At(i).ScopeMetrics().At(j).Metrics().At(k)) if err != nil { return ms, err @@ -42,7 +42,7 @@ func (g *geoIPProcessor) processMetricAttributes(ctx context.Context, m pmetric. switch m.Type() { case pmetric.MetricTypeGauge: dps := m.Gauge().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { err := g.processAttributes(ctx, dps.At(i).Attributes()) if err != nil { return err @@ -50,7 +50,7 @@ func (g *geoIPProcessor) processMetricAttributes(ctx context.Context, m pmetric. } case pmetric.MetricTypeSum: dps := m.Sum().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { err := g.processAttributes(ctx, dps.At(i).Attributes()) if err != nil { return err @@ -58,7 +58,7 @@ func (g *geoIPProcessor) processMetricAttributes(ctx context.Context, m pmetric. } case pmetric.MetricTypeHistogram: dps := m.Histogram().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { err := g.processAttributes(ctx, dps.At(i).Attributes()) if err != nil { return err @@ -66,7 +66,7 @@ func (g *geoIPProcessor) processMetricAttributes(ctx context.Context, m pmetric. } case pmetric.MetricTypeExponentialHistogram: dps := m.ExponentialHistogram().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { err := g.processAttributes(ctx, dps.At(i).Attributes()) if err != nil { return err @@ -74,7 +74,7 @@ func (g *geoIPProcessor) processMetricAttributes(ctx context.Context, m pmetric. } case pmetric.MetricTypeSummary: dps := m.Summary().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { err := g.processAttributes(ctx, dps.At(i).Attributes()) if err != nil { return err diff --git a/processor/geoipprocessor/geoip_processor_traces.go b/processor/geoipprocessor/geoip_processor_traces.go index 590b3805cdf18..9efbdd61a1578 100644 --- a/processor/geoipprocessor/geoip_processor_traces.go +++ b/processor/geoipprocessor/geoip_processor_traces.go @@ -11,7 +11,7 @@ import ( func (g *geoIPProcessor) processTraces(ctx context.Context, ts ptrace.Traces) (ptrace.Traces, error) { rt := ts.ResourceSpans() - for i := 0; i < rt.Len(); i++ { + for i := range rt.Len() { switch g.cfg.Context { case resource: err := g.processAttributes(ctx, rt.At(i).Resource().Attributes()) @@ -19,8 +19,8 @@ func (g *geoIPProcessor) processTraces(ctx context.Context, ts ptrace.Traces) (p return ts, err } case record: - for j := 0; j < rt.At(i).ScopeSpans().Len(); j++ { - for k := 0; k < rt.At(i).ScopeSpans().At(j).Spans().Len(); k++ { + for j := range rt.At(i).ScopeSpans().Len() { + for k := range rt.At(i).ScopeSpans().At(j).Spans().Len() { err := g.processAttributes(ctx, rt.At(i).ScopeSpans().At(j).Spans().At(k).Attributes()) if err != nil { return ts, err diff --git a/processor/groupbyattrsprocessor/attribute_groups.go b/processor/groupbyattrsprocessor/attribute_groups.go index fef49f1120334..29d357b6c5044 100644 --- a/processor/groupbyattrsprocessor/attribute_groups.go +++ b/processor/groupbyattrsprocessor/attribute_groups.go @@ -27,7 +27,7 @@ func (tg *tracesGroup) findOrCreateResourceSpans(originResource pcommon.Resource referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes()) rss := tg.traces.ResourceSpans() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { if tg.resourceHashes[i] == referenceResourceHash { return rss.At(i) } @@ -54,7 +54,7 @@ func (mg *metricsGroup) findOrCreateResourceMetrics(originResource pcommon.Resou referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes()) rms := mg.metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { if mg.resourceHashes[i] == referenceResourceHash { return rms.At(i) } @@ -82,7 +82,7 @@ func (lg *logsGroup) findOrCreateResourceLogs(originResource pcommon.Resource, r referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes()) rls := lg.logs.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { if lg.resourceHashes[i] == referenceResourceHash { return rls.At(i) } @@ -102,7 +102,7 @@ func instrumentationLibrariesEqual(il1, il2 pcommon.InstrumentationScope) bool { // given InstrumentationScope. If nothing is found, it creates a new one func matchingScopeSpans(rl ptrace.ResourceSpans, library pcommon.InstrumentationScope) ptrace.ScopeSpans { ilss := rl.ScopeSpans() - for i := 0; i < ilss.Len(); i++ { + for i := range ilss.Len() { ils := ilss.At(i) if instrumentationLibrariesEqual(ils.Scope(), library) { return ils @@ -118,7 +118,7 @@ func matchingScopeSpans(rl ptrace.ResourceSpans, library pcommon.Instrumentation // given InstrumentationScope. If nothing is found, it creates a new one func matchingScopeLogs(rl plog.ResourceLogs, library pcommon.InstrumentationScope) plog.ScopeLogs { ills := rl.ScopeLogs() - for i := 0; i < ills.Len(); i++ { + for i := range ills.Len() { sl := ills.At(i) if instrumentationLibrariesEqual(sl.Scope(), library) { return sl @@ -134,7 +134,7 @@ func matchingScopeLogs(rl plog.ResourceLogs, library pcommon.InstrumentationScop // given InstrumentationScope. If nothing is found, it creates a new one func matchingScopeMetrics(rm pmetric.ResourceMetrics, library pcommon.InstrumentationScope) pmetric.ScopeMetrics { ilms := rm.ScopeMetrics() - for i := 0; i < ilms.Len(); i++ { + for i := range ilms.Len() { ilm := ilms.At(i) if instrumentationLibrariesEqual(ilm.Scope(), library) { return ilm diff --git a/processor/groupbyattrsprocessor/attribute_groups_test.go b/processor/groupbyattrsprocessor/attribute_groups_test.go index a7c40f9754452..63bd3ea2a5184 100644 --- a/processor/groupbyattrsprocessor/attribute_groups_test.go +++ b/processor/groupbyattrsprocessor/attribute_groups_test.go @@ -19,7 +19,7 @@ func simpleResource() pcommon.Resource { rs := pcommon.NewResource() rs.Attributes().PutStr("somekey1", "some-string-value") rs.Attributes().PutInt("somekey2", 123) - for i := 0; i < 10; i++ { + for i := range 10 { k := fmt.Sprint("random-", i) v := fmt.Sprint("value-", rand.IntN(100)) rs.Attributes().PutStr(k, v) @@ -29,7 +29,7 @@ func simpleResource() pcommon.Resource { func randomAttributeMap() pcommon.Map { attrs := pcommon.NewMap() - for i := 0; i < 10; i++ { + for i := range 10 { k := fmt.Sprint("key-", i) v := fmt.Sprint("value-", rand.IntN(500000)) attrs.PutStr(k, v) @@ -39,7 +39,7 @@ func randomAttributeMap() pcommon.Map { func randomGroups(count int) []pcommon.Map { entries := make([]pcommon.Map, count) - for i := 0; i < count; i++ { + for i := range count { entries[i] = randomAttributeMap() } return entries @@ -149,7 +149,7 @@ func TestInstrumentationLibraryMatching(t *testing.T) { func BenchmarkAttrGrouping(b *testing.B) { lg := newLogsGroup() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for range b.N { lg.findOrCreateResourceLogs(res, groups[rand.IntN(count)]) } } diff --git a/processor/groupbyattrsprocessor/processor.go b/processor/groupbyattrsprocessor/processor.go index ac1b142242671..259ebb063dd44 100644 --- a/processor/groupbyattrsprocessor/processor.go +++ b/processor/groupbyattrsprocessor/processor.go @@ -26,13 +26,13 @@ func (gap *groupByAttrsProcessor) processTraces(ctx context.Context, td ptrace.T rss := td.ResourceSpans() tg := newTracesGroup() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { rs := rss.At(i) ilss := rs.ScopeSpans() - for j := 0; j < ilss.Len(); j++ { + for j := range ilss.Len() { ils := ilss.At(j) - for k := 0; k < ils.Spans().Len(); k++ { + for k := range ils.Spans().Len() { span := ils.Spans().At(k) toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(span.Attributes()) @@ -64,13 +64,13 @@ func (gap *groupByAttrsProcessor) processLogs(ctx context.Context, ld plog.Logs) rl := ld.ResourceLogs() lg := newLogsGroup() - for i := 0; i < rl.Len(); i++ { + for i := range rl.Len() { ls := rl.At(i) ills := ls.ScopeLogs() - for j := 0; j < ills.Len(); j++ { + for j := range ills.Len() { sl := ills.At(j) - for k := 0; k < sl.LogRecords().Len(); k++ { + for k := range sl.LogRecords().Len() { log := sl.LogRecords().At(k) toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(log.Attributes()) @@ -102,47 +102,47 @@ func (gap *groupByAttrsProcessor) processMetrics(ctx context.Context, md pmetric rms := md.ResourceMetrics() mg := newMetricsGroup() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { rm := rms.At(i) ilms := rm.ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { ilm := ilms.At(j) - for k := 0; k < ilm.Metrics().Len(); k++ { + for k := range ilm.Metrics().Len() { metric := ilm.Metrics().At(k) //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: - for pointIndex := 0; pointIndex < metric.Gauge().DataPoints().Len(); pointIndex++ { + for pointIndex := range metric.Gauge().DataPoints().Len() { dataPoint := metric.Gauge().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Gauge().DataPoints().AppendEmpty()) } case pmetric.MetricTypeSum: - for pointIndex := 0; pointIndex < metric.Sum().DataPoints().Len(); pointIndex++ { + for pointIndex := range metric.Sum().DataPoints().Len() { dataPoint := metric.Sum().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Sum().DataPoints().AppendEmpty()) } case pmetric.MetricTypeSummary: - for pointIndex := 0; pointIndex < metric.Summary().DataPoints().Len(); pointIndex++ { + for pointIndex := range metric.Summary().DataPoints().Len() { dataPoint := metric.Summary().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Summary().DataPoints().AppendEmpty()) } case pmetric.MetricTypeHistogram: - for pointIndex := 0; pointIndex < metric.Histogram().DataPoints().Len(); pointIndex++ { + for pointIndex := range metric.Histogram().DataPoints().Len() { dataPoint := metric.Histogram().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Histogram().DataPoints().AppendEmpty()) } case pmetric.MetricTypeExponentialHistogram: - for pointIndex := 0; pointIndex < metric.ExponentialHistogram().DataPoints().Len(); pointIndex++ { + for pointIndex := range metric.ExponentialHistogram().DataPoints().Len() { dataPoint := metric.ExponentialHistogram().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.ExponentialHistogram().DataPoints().AppendEmpty()) @@ -190,7 +190,7 @@ func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pcommon.Map) func getMetricInInstrumentationLibrary(ilm pmetric.ScopeMetrics, searchedMetric pmetric.Metric) pmetric.Metric { // Loop through all metrics and try to find the one that matches with the one we search for // (name and type) - for i := 0; i < ilm.Metrics().Len(); i++ { + for i := range ilm.Metrics().Len() { metric := ilm.Metrics().At(i) if metric.Name() == searchedMetric.Name() && metric.Type() == searchedMetric.Type() { return metric diff --git a/processor/groupbyattrsprocessor/processor_test.go b/processor/groupbyattrsprocessor/processor_test.go index fe4c520ffd10c..08241e54878ce 100644 --- a/processor/groupbyattrsprocessor/processor_test.go +++ b/processor/groupbyattrsprocessor/processor_test.go @@ -63,13 +63,13 @@ func filterAttributeMap(attrMap pcommon.Map, selectedKeys []string) pcommon.Map func someComplexLogs(withResourceAttrIndex bool, rlCount int, illCount int) plog.Logs { logs := plog.NewLogs() - for i := 0; i < rlCount; i++ { + for i := range rlCount { rl := logs.ResourceLogs().AppendEmpty() if withResourceAttrIndex { rl.Resource().Attributes().PutInt("resourceAttrIndex", int64(i)) } - for j := 0; j < illCount; j++ { + for range illCount { log := rl.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() log.Attributes().PutStr("commonGroupedAttr", "abc") log.Attributes().PutStr("commonNonGroupedAttr", "xyz") @@ -82,13 +82,13 @@ func someComplexLogs(withResourceAttrIndex bool, rlCount int, illCount int) plog func someComplexTraces(withResourceAttrIndex bool, rsCount int, ilsCount int) ptrace.Traces { traces := ptrace.NewTraces() - for i := 0; i < rsCount; i++ { + for i := range rsCount { rs := traces.ResourceSpans().AppendEmpty() if withResourceAttrIndex { rs.Resource().Attributes().PutInt("resourceAttrIndex", int64(i)) } - for j := 0; j < ilsCount; j++ { + for j := range ilsCount { span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName(fmt.Sprintf("foo-%d-%d", i, j)) span.Attributes().PutStr("commonGroupedAttr", "abc") @@ -102,18 +102,18 @@ func someComplexTraces(withResourceAttrIndex bool, rsCount int, ilsCount int) pt func someComplexMetrics(withResourceAttrIndex bool, rmCount int, ilmCount int, dataPointCount int) pmetric.Metrics { metrics := pmetric.NewMetrics() - for i := 0; i < rmCount; i++ { + for i := range rmCount { rm := metrics.ResourceMetrics().AppendEmpty() if withResourceAttrIndex { rm.Resource().Attributes().PutInt("resourceAttrIndex", int64(i)) } - for j := 0; j < ilmCount; j++ { + for j := range ilmCount { metric := rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() metric.SetName(fmt.Sprintf("foo-%d-%d", i, j)) dps := metric.SetEmptyGauge().DataPoints() - for k := 0; k < dataPointCount; k++ { + for k := range dataPointCount { dataPoint := dps.AppendEmpty() dataPoint.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) dataPoint.SetIntValue(int64(k)) @@ -129,18 +129,18 @@ func someComplexMetrics(withResourceAttrIndex bool, rmCount int, ilmCount int, d func someComplexHistogramMetrics(withResourceAttrIndex bool, rmCount int, ilmCount int, dataPointCount int, histogramSize int) pmetric.Metrics { metrics := pmetric.NewMetrics() - for i := 0; i < rmCount; i++ { + for i := range rmCount { rm := metrics.ResourceMetrics().AppendEmpty() if withResourceAttrIndex { rm.Resource().Attributes().PutInt("resourceAttrIndex", int64(i)) } - for j := 0; j < ilmCount; j++ { + for j := range ilmCount { metric := rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() metric.SetName(fmt.Sprintf("foo-%d-%d", i, j)) metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - for k := 0; k < dataPointCount; k++ { + for range dataPointCount { dataPoint := metric.Histogram().DataPoints().AppendEmpty() dataPoint.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) buckets := randUIntArr(histogramSize) @@ -159,7 +159,7 @@ func someComplexHistogramMetrics(withResourceAttrIndex bool, rmCount int, ilmCou func randUIntArr(size int) []uint64 { arr := make([]uint64, size) - for i := 0; i < size; i++ { + for i := range size { arr[i] = rand.Uint64() } return arr @@ -175,7 +175,7 @@ func sum(arr []uint64) uint64 { func randFloat64Arr(size int) []float64 { arr := make([]float64, size) - for i := 0; i < size; i++ { + for i := range size { arr[i] = rand.Float64() } return arr @@ -306,15 +306,15 @@ func TestComplexAttributeGrouping(t *testing.T) { rls := processedLogs.ResourceLogs() assert.Equal(t, tt.outputResourceCount, rls.Len()) assert.Equal(t, tt.outputTotalRecordsCount, processedLogs.LogRecordCount()) - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rl := rls.At(i) assert.Equal(t, tt.outputInstrumentationLibraryCount, rl.ScopeLogs().Len()) assertResourceContainsAttributes(t, rl.Resource(), outputResourceAttrs) - for j := 0; j < rl.ScopeLogs().Len(); j++ { + for j := range rl.ScopeLogs().Len() { logs := rl.ScopeLogs().At(j).LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { assert.EqualValues(t, outputRecordAttrs, logs.At(k).Attributes()) } } @@ -323,15 +323,15 @@ func TestComplexAttributeGrouping(t *testing.T) { rss := processedSpans.ResourceSpans() assert.Equal(t, tt.outputResourceCount, rss.Len()) assert.Equal(t, tt.outputTotalRecordsCount, processedSpans.SpanCount()) - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { rs := rss.At(i) assert.Equal(t, tt.outputInstrumentationLibraryCount, rs.ScopeSpans().Len()) assertResourceContainsAttributes(t, rs.Resource(), outputResourceAttrs) - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { spans := rs.ScopeSpans().At(j).Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { assert.EqualValues(t, outputRecordAttrs, spans.At(k).Attributes()) } } @@ -340,17 +340,17 @@ func TestComplexAttributeGrouping(t *testing.T) { rms := processedMetrics.ResourceMetrics() assert.Equal(t, tt.outputResourceCount, rms.Len()) assert.Equal(t, tt.outputTotalRecordsCount, processedMetrics.MetricCount()) - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { rm := rms.At(i) assert.Equal(t, tt.outputInstrumentationLibraryCount, rm.ScopeMetrics().Len()) assertResourceContainsAttributes(t, rm.Resource(), outputResourceAttrs) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { metrics := rm.ScopeMetrics().At(j).Metrics() - for k := 0; k < metrics.Len(); k++ { + for k := range metrics.Len() { metric := metrics.At(k) - for l := 0; l < metric.Gauge().DataPoints().Len(); l++ { + for l := range metric.Gauge().DataPoints().Len() { assert.EqualValues(t, outputRecordAttrs, metric.Gauge().DataPoints().At(l).Attributes()) } } @@ -360,18 +360,18 @@ func TestComplexAttributeGrouping(t *testing.T) { rmhs := processedHistogramMetrics.ResourceMetrics() assert.Equal(t, tt.outputResourceCount, rmhs.Len()) assert.Equal(t, tt.outputTotalRecordsCount, processedHistogramMetrics.MetricCount()) - for i := 0; i < rmhs.Len(); i++ { + for i := range rmhs.Len() { rm := rmhs.At(i) assert.Equal(t, tt.outputInstrumentationLibraryCount, rm.ScopeMetrics().Len()) assertResourceContainsAttributes(t, rm.Resource(), outputResourceAttrs) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { metrics := rm.ScopeMetrics().At(j).Metrics() - for k := 0; k < metrics.Len(); k++ { + for k := range metrics.Len() { metric := metrics.At(k) assert.Equal(t, pmetric.AggregationTemporalityCumulative, metric.Histogram().AggregationTemporality()) - for l := 0; l < metric.Histogram().DataPoints().Len(); l++ { + for l := range metric.Histogram().DataPoints().Len() { assert.EqualValues(t, outputRecordAttrs, metric.Histogram().DataPoints().At(l).Attributes()) } } @@ -603,7 +603,7 @@ func TestAttributeGrouping(t *testing.T) { assert.Equal(t, tt.count, hms.Len()) assert.Equal(t, tt.count, ehms.Len()) - for i := 0; i < ls.Len(); i++ { + for i := range ls.Len() { log := ls.At(i) span := ss.At(i) gaugeDataPoint := gms.At(i).Gauge().DataPoints().At(0) @@ -626,10 +626,10 @@ func TestAttributeGrouping(t *testing.T) { func someSpans(attrs pcommon.Map, instrumentationLibraryCount int, spanCount int) ptrace.Traces { traces := ptrace.NewTraces() - for i := 0; i < instrumentationLibraryCount; i++ { + for i := range instrumentationLibraryCount { ilName := fmt.Sprint("ils-", i) - for j := 0; j < spanCount; j++ { + for j := range spanCount { ils := traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty() ils.Scope().SetName(ilName) span := ils.Spans().AppendEmpty() @@ -642,10 +642,10 @@ func someSpans(attrs pcommon.Map, instrumentationLibraryCount int, spanCount int func someLogs(attrs pcommon.Map, instrumentationLibraryCount int, logCount int) plog.Logs { logs := plog.NewLogs() - for i := 0; i < instrumentationLibraryCount; i++ { + for i := range instrumentationLibraryCount { ilName := fmt.Sprint("ils-", i) - for j := 0; j < logCount; j++ { + for range logCount { sl := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty() sl.Scope().SetName(ilName) log := sl.LogRecords().AppendEmpty() @@ -657,10 +657,10 @@ func someLogs(attrs pcommon.Map, instrumentationLibraryCount int, logCount int) func someGaugeMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metricCount int) pmetric.Metrics { metrics := pmetric.NewMetrics() - for i := 0; i < instrumentationLibraryCount; i++ { + for i := range instrumentationLibraryCount { ilName := fmt.Sprint("ils-", i) - for j := 0; j < metricCount; j++ { + for j := range metricCount { ilm := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Scope().SetName(ilName) metric := ilm.Metrics().AppendEmpty() @@ -674,10 +674,10 @@ func someGaugeMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metric func someSumMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metricCount int) pmetric.Metrics { metrics := pmetric.NewMetrics() - for i := 0; i < instrumentationLibraryCount; i++ { + for i := range instrumentationLibraryCount { ilName := fmt.Sprint("ils-", i) - for j := 0; j < metricCount; j++ { + for j := range metricCount { ilm := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Scope().SetName(ilName) metric := ilm.Metrics().AppendEmpty() @@ -691,10 +691,10 @@ func someSumMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metricCo func someSummaryMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metricCount int) pmetric.Metrics { metrics := pmetric.NewMetrics() - for i := 0; i < instrumentationLibraryCount; i++ { + for i := range instrumentationLibraryCount { ilName := fmt.Sprint("ils-", i) - for j := 0; j < metricCount; j++ { + for j := range metricCount { ilm := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Scope().SetName(ilName) metric := ilm.Metrics().AppendEmpty() @@ -708,10 +708,10 @@ func someSummaryMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metr func someHistogramMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metricCount int) pmetric.Metrics { metrics := pmetric.NewMetrics() - for i := 0; i < instrumentationLibraryCount; i++ { + for i := range instrumentationLibraryCount { ilName := fmt.Sprint("ils-", i) - for j := 0; j < metricCount; j++ { + for j := range metricCount { ilm := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Scope().SetName(ilName) metric := ilm.Metrics().AppendEmpty() @@ -725,10 +725,10 @@ func someHistogramMetrics(attrs pcommon.Map, instrumentationLibraryCount int, me func someExponentialHistogramMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metricCount int) pmetric.Metrics { metrics := pmetric.NewMetrics() - for i := 0; i < instrumentationLibraryCount; i++ { + for i := range instrumentationLibraryCount { ilName := fmt.Sprint("ils-", i) - for j := 0; j < metricCount; j++ { + for j := range metricCount { ilm := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Scope().SetName(ilName) metric := ilm.Metrics().AppendEmpty() @@ -889,7 +889,7 @@ func TestMetricAdvancedGrouping(t *testing.T) { // Test helper function that retrieves the resource with the specified "host.name" attribute func retrieveHostResource(resources pmetric.ResourceMetricsSlice, hostname string) (pmetric.ResourceMetrics, bool) { - for i := 0; i < resources.Len(); i++ { + for i := range resources.Len() { resource := resources.At(i) hostnameValue, foundKey := resource.Resource().Attributes().Get("host.name") if foundKey && hostnameValue.AsString() == hostname { @@ -901,7 +901,7 @@ func retrieveHostResource(resources pmetric.ResourceMetricsSlice, hostname strin // Test helper function that retrieves the specified metric func retrieveMetric(metrics pmetric.MetricSlice, name string, metricType pmetric.MetricType) (pmetric.Metric, bool) { - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { metric := metrics.At(i) if metric.Name() == name && metric.Type() == metricType { return metric, true @@ -945,7 +945,7 @@ func TestCompacting(t *testing.T) { assert.Equal(t, 10, rls.ScopeLogs().Len()) assert.Equal(t, 10, rlm.ScopeMetrics().Len()) - for i := 0; i < 10; i++ { + for i := range 10 { ils := rss.ScopeSpans().At(i) sl := rls.ScopeLogs().At(i) ilm := rlm.ScopeMetrics().At(i) @@ -1035,7 +1035,7 @@ func BenchmarkCompacting(bb *testing.B) { require.NoError(b, err) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { _, err := gap.processTraces(context.Background(), spans) if err != nil { return diff --git a/processor/groupbytraceprocessor/event_test.go b/processor/groupbytraceprocessor/event_test.go index 61f55808722a0..22859a6e76005 100644 --- a/processor/groupbytraceprocessor/event_test.go +++ b/processor/groupbytraceprocessor/event_test.go @@ -344,11 +344,11 @@ func TestEventConsumeConsistency(t *testing.T) { t.Run(tt.casename, func(t *testing.T) { realTraceID := workerIndexForTraceID(pcommon.TraceID(tt.traceID), 100) var wg sync.WaitGroup - for i := 0; i < 50; i++ { + for range 50 { wg.Add(1) go func() { defer wg.Done() - for j := 0; j < 30; j++ { + for range 30 { assert.Equal(t, realTraceID, workerIndexForTraceID(pcommon.TraceID(tt.traceID), 100)) } }() diff --git a/processor/groupbytraceprocessor/processor_test.go b/processor/groupbytraceprocessor/processor_test.go index 162738d8b8f6c..663f564a38898 100644 --- a/processor/groupbytraceprocessor/processor_test.go +++ b/processor/groupbytraceprocessor/processor_test.go @@ -577,7 +577,7 @@ func BenchmarkConsumeTracesCompleteOnFirstBatch(b *testing.B) { assert.NoError(b, p.Shutdown(ctx)) }() - for n := 0; n < b.N; n++ { + for n := range b.N { traceID := pcommon.TraceID([16]byte{byte(1 + n), 2, 3, 4}) trace := simpleTracesWithID(traceID) assert.NoError(b, p.ConsumeTraces(context.Background(), trace)) diff --git a/processor/groupbytraceprocessor/storage_memory.go b/processor/groupbytraceprocessor/storage_memory.go index 9de15d877c947..9f95a58fe3095 100644 --- a/processor/groupbytraceprocessor/storage_memory.go +++ b/processor/groupbytraceprocessor/storage_memory.go @@ -42,7 +42,7 @@ func (st *memoryStorage) createOrAppend(traceID pcommon.TraceID, td ptrace.Trace newRss := ptrace.NewResourceSpansSlice() td.ResourceSpans().CopyTo(newRss) - for i := 0; i < newRss.Len(); i++ { + for i := range newRss.Len() { content = append(content, newRss.At(i)) } st.content[traceID] = content diff --git a/processor/intervalprocessor/processor.go b/processor/intervalprocessor/processor.go index 5a9df9f4e0b0f..94f1a52b06772 100644 --- a/processor/intervalprocessor/processor.go +++ b/processor/intervalprocessor/processor.go @@ -179,7 +179,7 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro } func aggregateDataPoints[DPS metrics.DataPointSlice[DP], DP metrics.DataPoint[DP]](dataPoints DPS, mCloneDataPoints DPS, metricID identity.Metric, dpLookup map[identity.Stream]DP) { - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { dp := dataPoints.At(i) streamID := identity.OfStream(metricID, dp) diff --git a/processor/k8sattributesprocessor/processor.go b/processor/k8sattributesprocessor/processor.go index cfcc33c260f79..fff3aa330b4fb 100644 --- a/processor/k8sattributesprocessor/processor.go +++ b/processor/k8sattributesprocessor/processor.go @@ -100,7 +100,7 @@ func (kp *kubernetesprocessor) Shutdown(context.Context) error { // processTraces process traces and add k8s metadata using resource IP or incoming IP as pod origin. func (kp *kubernetesprocessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { kp.processResource(ctx, rss.At(i).Resource()) } @@ -110,7 +110,7 @@ func (kp *kubernetesprocessor) processTraces(ctx context.Context, td ptrace.Trac // processMetrics process metrics and add k8s metadata using resource IP, hostname or incoming IP as pod origin. func (kp *kubernetesprocessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rm := md.ResourceMetrics() - for i := 0; i < rm.Len(); i++ { + for i := range rm.Len() { kp.processResource(ctx, rm.At(i).Resource()) } @@ -120,7 +120,7 @@ func (kp *kubernetesprocessor) processMetrics(ctx context.Context, md pmetric.Me // processLogs process logs and add k8s metadata using resource IP, hostname or incoming IP as pod origin. func (kp *kubernetesprocessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { rl := ld.ResourceLogs() - for i := 0; i < rl.Len(); i++ { + for i := range rl.Len() { kp.processResource(ctx, rl.At(i).Resource()) } @@ -130,7 +130,7 @@ func (kp *kubernetesprocessor) processLogs(ctx context.Context, ld plog.Logs) (p // processProfiles process profiles and add k8s metadata using resource IP, hostname or incoming IP as pod origin. func (kp *kubernetesprocessor) processProfiles(ctx context.Context, pd pprofile.Profiles) (pprofile.Profiles, error) { rp := pd.ResourceProfiles() - for i := 0; i < rp.Len(); i++ { + for i := range rp.Len() { kp.processResource(ctx, rp.At(i).Resource()) } diff --git a/processor/k8sattributesprocessor/processor_test.go b/processor/k8sattributesprocessor/processor_test.go index 06b28caa77467..008df5720774b 100644 --- a/processor/k8sattributesprocessor/processor_test.go +++ b/processor/k8sattributesprocessor/processor_test.go @@ -1645,7 +1645,7 @@ func assertResourceHasStringSlice(t *testing.T, r pcommon.Resource, k string, v require.Truef(t, ok, "resource does not contain attribute %s", k) assert.EqualValues(t, pcommon.ValueTypeSlice, got.Type(), "attribute %s is not of type slice", k) slice := got.Slice() - for i := 0; i < slice.Len(); i++ { + for i := range slice.Len() { assert.EqualValues(t, pcommon.ValueTypeStr, slice.At(i).Type()) assert.EqualValues(t, v[i], slice.At(i).AsString(), "attribute %s[%d] is not equal to %s", k, i, v[i]) } diff --git a/processor/logdedupprocessor/counter_test.go b/processor/logdedupprocessor/counter_test.go index 2deab717cbcc3..ba6656d64769a 100644 --- a/processor/logdedupprocessor/counter_test.go +++ b/processor/logdedupprocessor/counter_test.go @@ -95,7 +95,7 @@ func Test_logAggregatorReset(t *testing.T) { require.NoError(t, err) aggregator := newLogAggregator("log_count", time.UTC, telemetryBuilder, nil) - for i := 0; i < 2; i++ { + for i := range 2 { resource := pcommon.NewResource() resource.Attributes().PutInt("i", int64(i)) key := getResourceKey(resource) diff --git a/processor/logdedupprocessor/processor.go b/processor/logdedupprocessor/processor.go index 306cc9c189694..c9d3f20225f7c 100644 --- a/processor/logdedupprocessor/processor.go +++ b/processor/logdedupprocessor/processor.go @@ -86,11 +86,11 @@ func (p *logDedupProcessor) ConsumeLogs(ctx context.Context, pl plog.Logs) error p.mux.Lock() defer p.mux.Unlock() - for i := 0; i < pl.ResourceLogs().Len(); i++ { + for i := range pl.ResourceLogs().Len() { rl := pl.ResourceLogs().At(i) resource := rl.Resource() - for j := 0; j < rl.ScopeLogs().Len(); j++ { + for j := range rl.ScopeLogs().Len() { sl := rl.ScopeLogs().At(j) scope := sl.Scope() logs := sl.LogRecords() diff --git a/processor/logstransformprocessor/processor_test.go b/processor/logstransformprocessor/processor_test.go index c51bb042911d0..41695396bd544 100644 --- a/processor/logstransformprocessor/processor_test.go +++ b/processor/logstransformprocessor/processor_test.go @@ -262,7 +262,7 @@ func TestProcessorShutdownWithSlowOperator(t *testing.T) { scopeLogs := testLog.ResourceLogs().AppendEmpty(). ScopeLogs().AppendEmpty() - for i := 0; i < 500; i++ { + for range 500 { lr := scopeLogs.LogRecords().AppendEmpty() lr.Body().SetStr("Test message") } diff --git a/processor/metricsgenerationprocessor/processor.go b/processor/metricsgenerationprocessor/processor.go index 425bef0bf70d3..0d1d29fde8761 100644 --- a/processor/metricsgenerationprocessor/processor.go +++ b/processor/metricsgenerationprocessor/processor.go @@ -52,7 +52,7 @@ func (mgp *metricsGenerationProcessor) Start(context.Context, component.Host) er func (mgp *metricsGenerationProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { resourceMetricsSlice := md.ResourceMetrics() - for i := 0; i < resourceMetricsSlice.Len(); i++ { + for i := range resourceMetricsSlice.Len() { rm := resourceMetricsSlice.At(i) nameToMetricMap := getNameToMetricMap(rm) diff --git a/processor/metricsgenerationprocessor/processor_test.go b/processor/metricsgenerationprocessor/processor_test.go index 39605fe8a43bd..96ca25780a981 100644 --- a/processor/metricsgenerationprocessor/processor_test.go +++ b/processor/metricsgenerationprocessor/processor_test.go @@ -305,7 +305,7 @@ func TestMetricsGenerationProcessor(t *testing.T) { require.Equal(t, expectedMetrics.Len(), actualMetrics.Len()) - for i := 0; i < expectedMetrics.Len(); i++ { + for i := range expectedMetrics.Len() { eM := expectedMetrics.At(i) aM := actualMetrics.At(i) @@ -316,7 +316,7 @@ func TestMetricsGenerationProcessor(t *testing.T) { aDataPoints := aM.Gauge().DataPoints() require.Equal(t, eDataPoints.Len(), aDataPoints.Len()) - for j := 0; j < eDataPoints.Len(); j++ { + for j := range eDataPoints.Len() { switch eDataPoints.At(j).ValueType() { case pmetric.NumberDataPointValueTypeDouble: require.Equal(t, eDataPoints.At(j).DoubleValue(), aDataPoints.At(j).DoubleValue()) diff --git a/processor/metricsgenerationprocessor/utils.go b/processor/metricsgenerationprocessor/utils.go index 72fab9ca7f6e2..c58832a03555b 100644 --- a/processor/metricsgenerationprocessor/utils.go +++ b/processor/metricsgenerationprocessor/utils.go @@ -15,10 +15,10 @@ func getNameToMetricMap(rm pmetric.ResourceMetrics) map[string]pmetric.Metric { ilms := rm.ScopeMetrics() metricMap := make(map[string]pmetric.Metric) - for i := 0; i < ilms.Len(); i++ { + for i := range ilms.Len() { ilm := ilms.At(i) metricSlice := ilm.Metrics() - for j := 0; j < metricSlice.Len(); j++ { + for j := range metricSlice.Len() { metric := metricSlice.At(j) metricMap[metric.Name()] = metric } @@ -56,10 +56,10 @@ func getMetricValue(metric pmetric.Metric) float64 { // Note: This method assumes the matchAttributes feature flag is enabled. func generateCalculatedMetrics(rm pmetric.ResourceMetrics, metric2 pmetric.Metric, rule internalRule, logger *zap.Logger) { ilms := rm.ScopeMetrics() - for i := 0; i < ilms.Len(); i++ { + for i := range ilms.Len() { ilm := ilms.At(i) metricSlice := ilm.Metrics() - for j := 0; j < metricSlice.Len(); j++ { + for j := range metricSlice.Len() { metric := metricSlice.At(j) if metric.Name() == rule.metric1 { @@ -104,10 +104,10 @@ func generateMetricFromMatchingAttributes(metric1 pmetric.Metric, metric2 pmetri return pmetric.NewMetric() } - for i := 0; i < metric1DataPoints.Len(); i++ { + for i := range metric1DataPoints.Len() { metric1DP := metric1DataPoints.At(i) - for j := 0; j < metric2DataPoints.Len(); j++ { + for j := range metric2DataPoints.Len() { metric2DP := metric2DataPoints.At(j) if dataPointAttributesMatch(metric1DP, metric2DP) { val, err := calculateValue(dataPointValue(metric1DP), dataPointValue(metric2DP), rule.operation, rule.name) @@ -161,10 +161,10 @@ func dataPointAttributesMatch(dp1, dp2 pmetric.NumberDataPoint) bool { // The value for newly calculated metrics is always a floating point number. func generateScalarMetrics(rm pmetric.ResourceMetrics, operand2 float64, rule internalRule, logger *zap.Logger) { ilms := rm.ScopeMetrics() - for i := 0; i < ilms.Len(); i++ { + for i := range ilms.Len() { ilm := ilms.At(i) metricSlice := ilm.Metrics() - for j := 0; j < metricSlice.Len(); j++ { + for j := range metricSlice.Len() { metric := metricSlice.At(j) if metric.Name() == rule.metric1 { newMetric := generateMetricFromOperand(metric, operand2, rule.operation, logger) @@ -190,7 +190,7 @@ func generateMetricFromOperand(from pmetric.Metric, operand2 float64, operation return pmetric.NewMetric() } - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { fromDataPoint := dataPoints.At(i) var operand1 float64 switch fromDataPoint.ValueType() { diff --git a/processor/metricstransformprocessor/factory.go b/processor/metricstransformprocessor/factory.go index 6a87e8feae102..2d27b2158c253 100644 --- a/processor/metricstransformprocessor/factory.go +++ b/processor/metricstransformprocessor/factory.go @@ -188,7 +188,7 @@ func createFilter(filterConfig FilterConfig) (internalFilter, error) { // createLabelValueMapping creates the labelValue rename mappings based on the valueActions func createLabelValueMapping(valueActions []ValueAction, version string) map[string]string { mapping := make(map[string]string) - for i := 0; i < len(valueActions); i++ { + for i := range valueActions { valueActions[i].NewValue = strings.ReplaceAll(valueActions[i].NewValue, "{{version}}", version) mapping[valueActions[i].Value] = valueActions[i].NewValue } diff --git a/processor/metricstransformprocessor/metrics_testcase_builder_test.go b/processor/metricstransformprocessor/metrics_testcase_builder_test.go index d8b7a1e5cc8cf..de8cf09b77fcb 100644 --- a/processor/metricstransformprocessor/metrics_testcase_builder_test.go +++ b/processor/metricstransformprocessor/metrics_testcase_builder_test.go @@ -115,7 +115,7 @@ func (b builder) addHistogramDatapointWithMinMaxAndExemplars(start, ts pcommon.T dp.SetMax(maxVal) dp.ExplicitBounds().FromRaw(bounds) dp.BucketCounts().FromRaw(buckets) - for ei := 0; ei < len(exemplarValues); ei++ { + for ei := range exemplarValues { exemplar := dp.Exemplars().AppendEmpty() exemplar.SetTimestamp(ts) exemplar.SetDoubleValue(exemplarValues[ei]) @@ -156,7 +156,7 @@ func (b builder) addExpHistogramDatapoint(config expHistogramConfig) builder { dp.Positive().BucketCounts().FromRaw(config.positiveCount) dp.Negative().SetOffset(config.negativeOffset) dp.Negative().BucketCounts().FromRaw(config.negativeCount) - for ei := 0; ei < len(config.exemplarValues); ei++ { + for ei := range len(config.exemplarValues) { exemplar := dp.Exemplars().AppendEmpty() exemplar.SetTimestamp(1) exemplar.SetDoubleValue(config.exemplarValues[ei]) diff --git a/processor/metricstransformprocessor/metrics_transform_processor_otlp.go b/processor/metricstransformprocessor/metrics_transform_processor_otlp.go index cf6ee289bb864..815fca890a26a 100644 --- a/processor/metricstransformprocessor/metrics_transform_processor_otlp.go +++ b/processor/metricstransformprocessor/metrics_transform_processor_otlp.go @@ -31,7 +31,7 @@ func extractAndRemoveMatchedMetrics(dest pmetric.MetricSlice, f internalFilter, // matchMetrics returns a slice of metrics matching the filter f. Original metrics slice is not affected. func matchMetrics(f internalFilter, metrics pmetric.MetricSlice) []pmetric.Metric { mm := make([]pmetric.Metric, 0, metrics.Len()) - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { if f.matchMetric(metrics.At(i)) { mm = append(mm, metrics.At(i)) } @@ -168,14 +168,14 @@ func extractMetricWithMatchingAttrs(metric pmetric.Metric, f internalFilter) pme //exhaustive:enforce case pmetric.MetricTypeGauge: newMetric.SetEmptyGauge().DataPoints().EnsureCapacity(matchedDpsCount) - for i := 0; i < metric.Gauge().DataPoints().Len(); i++ { + for i := range metric.Gauge().DataPoints().Len() { if dpsMatches[i] { metric.Gauge().DataPoints().At(i).CopyTo(newMetric.Gauge().DataPoints().AppendEmpty()) } } case pmetric.MetricTypeSum: newMetric.SetEmptySum().DataPoints().EnsureCapacity(matchedDpsCount) - for i := 0; i < metric.Sum().DataPoints().Len(); i++ { + for i := range metric.Sum().DataPoints().Len() { if dpsMatches[i] { metric.Sum().DataPoints().At(i).CopyTo(newMetric.Sum().DataPoints().AppendEmpty()) } @@ -184,7 +184,7 @@ func extractMetricWithMatchingAttrs(metric pmetric.Metric, f internalFilter) pme newMetric.Sum().SetIsMonotonic(metric.Sum().IsMonotonic()) case pmetric.MetricTypeHistogram: newMetric.SetEmptyHistogram().DataPoints().EnsureCapacity(matchedDpsCount) - for i := 0; i < metric.Histogram().DataPoints().Len(); i++ { + for i := range metric.Histogram().DataPoints().Len() { if dpsMatches[i] { metric.Histogram().DataPoints().At(i).CopyTo(newMetric.Histogram().DataPoints().AppendEmpty()) } @@ -192,7 +192,7 @@ func extractMetricWithMatchingAttrs(metric pmetric.Metric, f internalFilter) pme newMetric.Histogram().SetAggregationTemporality(metric.Histogram().AggregationTemporality()) case pmetric.MetricTypeExponentialHistogram: newMetric.SetEmptyExponentialHistogram().DataPoints().EnsureCapacity(matchedDpsCount) - for i := 0; i < metric.ExponentialHistogram().DataPoints().Len(); i++ { + for i := range metric.ExponentialHistogram().DataPoints().Len() { if dpsMatches[i] { metric.ExponentialHistogram().DataPoints().At(i).CopyTo(newMetric.ExponentialHistogram().DataPoints().AppendEmpty()) } @@ -200,7 +200,7 @@ func extractMetricWithMatchingAttrs(metric pmetric.Metric, f internalFilter) pme newMetric.ExponentialHistogram().SetAggregationTemporality(metric.ExponentialHistogram().AggregationTemporality()) case pmetric.MetricTypeSummary: newMetric.SetEmptySummary().DataPoints().EnsureCapacity(matchedDpsCount) - for i := 0; i < metric.Summary().DataPoints().Len(); i++ { + for i := range metric.Summary().DataPoints().Len() { if dpsMatches[i] { metric.Summary().DataPoints().At(i).CopyTo(newMetric.Summary().DataPoints().AppendEmpty()) } @@ -262,7 +262,7 @@ func (mtp *metricsTransformProcessor) processMetrics(_ context.Context, md pmetr case Insert: // Save len, so we don't iterate over the newly generated metrics that are appended at the end. mLen := metrics.Len() - for i := 0; i < mLen; i++ { + for i := range mLen { metric := metrics.At(i) newMetric := transform.MetricIncludeFilter.extractMatchedMetric(metric) if newMetric == (pmetric.Metric{}) { @@ -445,7 +445,7 @@ func combine(transform internalTransform, metrics pmetric.MetricSlice) pmetric.M // canBeCombined must be called before. func groupMetrics(metrics pmetric.MetricSlice, aggType aggregateutil.AggregationType, to pmetric.Metric) { ag := aggregateutil.AggGroups{} - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { aggregateutil.GroupDataPoints(metrics.At(i), &ag) } aggregateutil.MergeDataPoints(to, aggType, ag) @@ -477,35 +477,35 @@ func rangeDataPointAttributes(metric pmetric.Metric, f func(pcommon.Map) bool) { //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: - for i := 0; i < metric.Gauge().DataPoints().Len(); i++ { + for i := range metric.Gauge().DataPoints().Len() { dp := metric.Gauge().DataPoints().At(i) if !f(dp.Attributes()) { return } } case pmetric.MetricTypeSum: - for i := 0; i < metric.Sum().DataPoints().Len(); i++ { + for i := range metric.Sum().DataPoints().Len() { dp := metric.Sum().DataPoints().At(i) if !f(dp.Attributes()) { return } } case pmetric.MetricTypeHistogram: - for i := 0; i < metric.Histogram().DataPoints().Len(); i++ { + for i := range metric.Histogram().DataPoints().Len() { dp := metric.Histogram().DataPoints().At(i) if !f(dp.Attributes()) { return } } case pmetric.MetricTypeExponentialHistogram: - for i := 0; i < metric.ExponentialHistogram().DataPoints().Len(); i++ { + for i := range metric.ExponentialHistogram().DataPoints().Len() { dp := metric.ExponentialHistogram().DataPoints().At(i) if !f(dp.Attributes()) { return } } case pmetric.MetricTypeSummary: - for i := 0; i < metric.Summary().DataPoints().Len(); i++ { + for i := range metric.Summary().DataPoints().Len() { dp := metric.Summary().DataPoints().At(i) if !f(dp.Attributes()) { return diff --git a/processor/metricstransformprocessor/operation_scale_value.go b/processor/metricstransformprocessor/operation_scale_value.go index a5f44bff9113d..16339dfee1432 100644 --- a/processor/metricstransformprocessor/operation_scale_value.go +++ b/processor/metricstransformprocessor/operation_scale_value.go @@ -28,7 +28,7 @@ func scaleValueOp(metric pmetric.Metric, op internalOperation, f internalFilter) return } - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) if !f.matchAttrs(dp.Attributes()) { continue @@ -45,7 +45,7 @@ func scaleValueOp(metric pmetric.Metric, op internalOperation, f internalFilter) func scaleHistogramOp(metric pmetric.Metric, op internalOperation, f internalFilter) { dps := metric.Histogram().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) if !f.matchAttrs(dp.Attributes()) { continue @@ -71,7 +71,7 @@ func scaleHistogramOp(metric pmetric.Metric, op internalOperation, f internalFil func scaleExpHistogramOp(metric pmetric.Metric, op internalOperation, f internalFilter) { dps := metric.ExponentialHistogram().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) if !f.matchAttrs(dp.Attributes()) { continue diff --git a/processor/metricstransformprocessor/operation_toggle_scalar_datatype.go b/processor/metricstransformprocessor/operation_toggle_scalar_datatype.go index 4336139ba96ae..73c95df7fc9c3 100644 --- a/processor/metricstransformprocessor/operation_toggle_scalar_datatype.go +++ b/processor/metricstransformprocessor/operation_toggle_scalar_datatype.go @@ -20,7 +20,7 @@ func toggleScalarDataTypeOp(metric pmetric.Metric, f internalFilter) { return } - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) if !f.matchAttrs(dp.Attributes()) { continue diff --git a/processor/probabilisticsamplerprocessor/fnvhasher_test.go b/processor/probabilisticsamplerprocessor/fnvhasher_test.go index 5f69d3fcb0e03..d4a68247ebd42 100644 --- a/processor/probabilisticsamplerprocessor/fnvhasher_test.go +++ b/processor/probabilisticsamplerprocessor/fnvhasher_test.go @@ -10,7 +10,7 @@ import ( func BenchmarkSeedConversion(b *testing.B) { val := uint32(0x3024001) // Just a random 32 bit int b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { i32tob(val) } } diff --git a/processor/probabilisticsamplerprocessor/logsprocessor_test.go b/processor/probabilisticsamplerprocessor/logsprocessor_test.go index 510ff038c92e2..c86162b6f4bcf 100644 --- a/processor/probabilisticsamplerprocessor/logsprocessor_test.go +++ b/processor/probabilisticsamplerprocessor/logsprocessor_test.go @@ -175,7 +175,7 @@ func TestLogsSampling(t *testing.T) { require.NoError(t, err) logs := plog.NewLogs() lr := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords() - for i := 0; i < 100; i++ { + for i := range 100 { record := lr.AppendEmpty() record.SetTimestamp(pcommon.Timestamp(time.Unix(1649400860, 0).Unix())) record.SetSeverityNumber(plog.SeverityNumberDebug) diff --git a/processor/probabilisticsamplerprocessor/tracesprocessor_test.go b/processor/probabilisticsamplerprocessor/tracesprocessor_test.go index fc5f880c052f6..754eebbdf2884 100644 --- a/processor/probabilisticsamplerprocessor/tracesprocessor_test.go +++ b/processor/probabilisticsamplerprocessor/tracesprocessor_test.go @@ -1133,10 +1133,10 @@ func initSpanWithAttribute(key string, value pcommon.Value, dest ptrace.Span) { func genRandomTestData(numBatches, numTracesPerBatch int, serviceName string, resourceSpanCount int) (tdd []ptrace.Traces) { r := rand.New(rand.NewPCG(123, 456)) var traceBatches []ptrace.Traces - for i := 0; i < numBatches; i++ { + for range numBatches { traces := ptrace.NewTraces() traces.ResourceSpans().EnsureCapacity(resourceSpanCount) - for j := 0; j < resourceSpanCount; j++ { + for range resourceSpanCount { rs := traces.ResourceSpans().AppendEmpty() rs.Resource().Attributes().PutStr("service.name", serviceName) rs.Resource().Attributes().PutBool("bool", true) @@ -1145,7 +1145,7 @@ func genRandomTestData(numBatches, numTracesPerBatch int, serviceName string, re ils := rs.ScopeSpans().AppendEmpty() ils.Spans().EnsureCapacity(numTracesPerBatch) - for k := 0; k < numTracesPerBatch; k++ { + for range numTracesPerBatch { span := ils.Spans().AppendEmpty() span.SetTraceID(idutils.UInt64ToTraceID(r.Uint64(), r.Uint64())) span.SetSpanID(idutils.UInt64ToSpanID(r.Uint64())) @@ -1195,15 +1195,15 @@ func newAssertTraces(t *testing.T, name string) *assertTraces { func (a *assertTraces) onSampledData(sampled []ptrace.Traces) { for _, td := range sampled { rspans := td.ResourceSpans() - for i := 0; i < rspans.Len(); i++ { + for i := range rspans.Len() { rspan := rspans.At(i) ilss := rspan.ScopeSpans() - for j := 0; j < ilss.Len(); j++ { + for j := range ilss.Len() { ils := ilss.At(j) if svcNameAttr, _ := rspan.Resource().Attributes().Get("service.name"); svcNameAttr.Str() != a.testName { continue } - for k := 0; k < ils.Spans().Len(); k++ { + for k := range ils.Spans().Len() { a.spanCount++ span := ils.Spans().At(k) key := span.TraceID() diff --git a/processor/redactionprocessor/processor.go b/processor/redactionprocessor/processor.go index 4998d106a342c..7dc801df08860 100644 --- a/processor/redactionprocessor/processor.go +++ b/processor/redactionprocessor/processor.go @@ -63,7 +63,7 @@ func newRedaction(ctx context.Context, config *Config, logger *zap.Logger) (*red // processTraces implements ProcessMetricsFunc. It processes the incoming data // and returns the data to be sent to the next component func (s *redaction) processTraces(ctx context.Context, batch ptrace.Traces) (ptrace.Traces, error) { - for i := 0; i < batch.ResourceSpans().Len(); i++ { + for i := range batch.ResourceSpans().Len() { rs := batch.ResourceSpans().At(i) s.processResourceSpan(ctx, rs) } @@ -71,7 +71,7 @@ func (s *redaction) processTraces(ctx context.Context, batch ptrace.Traces) (ptr } func (s *redaction) processLogs(ctx context.Context, logs plog.Logs) (plog.Logs, error) { - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { rl := logs.ResourceLogs().At(i) s.processResourceLog(ctx, rl) } @@ -79,7 +79,7 @@ func (s *redaction) processLogs(ctx context.Context, logs plog.Logs) (plog.Logs, } func (s *redaction) processMetrics(ctx context.Context, metrics pmetric.Metrics) (pmetric.Metrics, error) { - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) s.processResourceMetric(ctx, rm) } @@ -94,9 +94,9 @@ func (s *redaction) processResourceSpan(ctx context.Context, rs ptrace.ResourceS // Attributes can be part of a resource span s.processAttrs(ctx, rsAttrs) - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { ils := rs.ScopeSpans().At(j) - for k := 0; k < ils.Spans().Len(); k++ { + for k := range ils.Spans().Len() { span := ils.Spans().At(k) spanAttrs := span.Attributes() @@ -113,9 +113,9 @@ func (s *redaction) processResourceLog(ctx context.Context, rl plog.ResourceLogs s.processAttrs(ctx, rsAttrs) - for j := 0; j < rl.ScopeLogs().Len(); j++ { + for j := range rl.ScopeLogs().Len() { ils := rl.ScopeLogs().At(j) - for k := 0; k < ils.LogRecords().Len(); k++ { + for k := range ils.LogRecords().Len() { log := ils.LogRecords().At(k) s.processAttrs(ctx, log.Attributes()) } @@ -127,34 +127,34 @@ func (s *redaction) processResourceMetric(ctx context.Context, rm pmetric.Resour s.processAttrs(ctx, rsAttrs) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { ils := rm.ScopeMetrics().At(j) - for k := 0; k < ils.Metrics().Len(); k++ { + for k := range ils.Metrics().Len() { metric := ils.Metrics().At(k) switch metric.Type() { case pmetric.MetricTypeGauge: dps := metric.Gauge().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { s.processAttrs(ctx, dps.At(i).Attributes()) } case pmetric.MetricTypeSum: dps := metric.Sum().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { s.processAttrs(ctx, dps.At(i).Attributes()) } case pmetric.MetricTypeHistogram: dps := metric.Histogram().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { s.processAttrs(ctx, dps.At(i).Attributes()) } case pmetric.MetricTypeExponentialHistogram: dps := metric.ExponentialHistogram().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { s.processAttrs(ctx, dps.At(i).Attributes()) } case pmetric.MetricTypeSummary: dps := metric.Summary().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { s.processAttrs(ctx, dps.At(i).Attributes()) } case pmetric.MetricTypeEmpty: diff --git a/processor/redactionprocessor/processor_test.go b/processor/redactionprocessor/processor_test.go index 55fd8a7471ae9..30d1f990fa962 100644 --- a/processor/redactionprocessor/processor_test.go +++ b/processor/redactionprocessor/processor_test.go @@ -744,7 +744,7 @@ func BenchmarkRedactSummaryDebug(b *testing.B) { ctx := context.Background() processor, _ := newRedaction(ctx, config, zaptest.NewLogger(b)) - for i := 0; i < b.N; i++ { + for range b.N { runBenchmark(allowed, redacted, masked, ignored, processor) } } @@ -775,7 +775,7 @@ func BenchmarkMaskSummaryDebug(b *testing.B) { ctx := context.Background() processor, _ := newRedaction(ctx, config, zaptest.NewLogger(b)) - for i := 0; i < b.N; i++ { + for range b.N { runBenchmark(allowed, nil, masked, ignored, processor) } } diff --git a/processor/remotetapprocessor/processor_test.go b/processor/remotetapprocessor/processor_test.go index 50c59ad34a13f..cbc5641358ee9 100644 --- a/processor/remotetapprocessor/processor_test.go +++ b/processor/remotetapprocessor/processor_test.go @@ -50,7 +50,7 @@ func TestConsumeMetrics(t *testing.T) { } }() - for i := 0; i < c.limit*2; i++ { + for range c.limit * 2 { // send metric to chan c.limit*2 per sec. metric2, err := processor.ConsumeMetrics(context.Background(), metric) assert.NoError(t, err) @@ -99,7 +99,7 @@ func TestConsumeLogs(t *testing.T) { }() // send log to chan c.limit*2 per sec. - for i := 0; i < c.limit*2; i++ { + for range c.limit * 2 { log2, err := processor.ConsumeLogs(context.Background(), log) assert.NoError(t, err) assert.Equal(t, log, log2) @@ -147,7 +147,7 @@ func TestConsumeTraces(t *testing.T) { } }() - for i := 0; i < c.limit*2; i++ { + for range c.limit * 2 { // send trace to chan c.limit*2 per sec. trace2, err := processor.ConsumeTraces(context.Background(), trace) assert.NoError(t, err) diff --git a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go index f07702adac8bb..cdad20cf90b94 100644 --- a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go +++ b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go @@ -217,7 +217,7 @@ func TestDetectResource_Parallel(t *testing.T) { // call p.Get multiple times wg := &sync.WaitGroup{} wg.Add(iterations) - for i := 0; i < iterations; i++ { + for range iterations { go func() { defer wg.Done() detected, _, err := p.Get(context.Background(), http.DefaultClient) diff --git a/processor/resourcedetectionprocessor/resourcedetection_processor.go b/processor/resourcedetectionprocessor/resourcedetection_processor.go index 40d2939ad3541..2834945844e32 100644 --- a/processor/resourcedetectionprocessor/resourcedetection_processor.go +++ b/processor/resourcedetectionprocessor/resourcedetection_processor.go @@ -38,7 +38,7 @@ func (rdp *resourceDetectionProcessor) Start(ctx context.Context, host component // processTraces implements the ProcessTracesFunc type. func (rdp *resourceDetectionProcessor) processTraces(_ context.Context, td ptrace.Traces) (ptrace.Traces, error) { rs := td.ResourceSpans() - for i := 0; i < rs.Len(); i++ { + for i := range rs.Len() { rss := rs.At(i) rss.SetSchemaUrl(internal.MergeSchemaURL(rss.SchemaUrl(), rdp.schemaURL)) res := rss.Resource() @@ -50,7 +50,7 @@ func (rdp *resourceDetectionProcessor) processTraces(_ context.Context, td ptrac // processMetrics implements the ProcessMetricsFunc type. func (rdp *resourceDetectionProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rm := md.ResourceMetrics() - for i := 0; i < rm.Len(); i++ { + for i := range rm.Len() { rss := rm.At(i) rss.SetSchemaUrl(internal.MergeSchemaURL(rss.SchemaUrl(), rdp.schemaURL)) res := rss.Resource() @@ -62,7 +62,7 @@ func (rdp *resourceDetectionProcessor) processMetrics(_ context.Context, md pmet // processLogs implements the ProcessLogsFunc type. func (rdp *resourceDetectionProcessor) processLogs(_ context.Context, ld plog.Logs) (plog.Logs, error) { rl := ld.ResourceLogs() - for i := 0; i < rl.Len(); i++ { + for i := range rl.Len() { rss := rl.At(i) rss.SetSchemaUrl(internal.MergeSchemaURL(rss.SchemaUrl(), rdp.schemaURL)) res := rss.Resource() @@ -74,7 +74,7 @@ func (rdp *resourceDetectionProcessor) processLogs(_ context.Context, ld plog.Lo // processProfiles implements the ProcessProfilesFunc type. func (rdp *resourceDetectionProcessor) processProfiles(_ context.Context, ld pprofile.Profiles) (pprofile.Profiles, error) { rl := ld.ResourceProfiles() - for i := 0; i < rl.Len(); i++ { + for i := range rl.Len() { rss := rl.At(i) rss.SetSchemaUrl(internal.MergeSchemaURL(rss.SchemaUrl(), rdp.schemaURL)) res := rss.Resource() diff --git a/processor/resourcedetectionprocessor/resourcedetection_processor_test.go b/processor/resourcedetectionprocessor/resourcedetection_processor_test.go index 9931577f33388..da5e2bdad19f5 100644 --- a/processor/resourcedetectionprocessor/resourcedetection_processor_test.go +++ b/processor/resourcedetectionprocessor/resourcedetection_processor_test.go @@ -302,7 +302,7 @@ func benchmarkConsumeTraces(b *testing.B, cfg *Config) { processor, _ := factory.CreateTraces(context.Background(), processortest.NewNopSettings(), cfg, sink) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { // TODO use testbed.PerfTestDataProvider here once that includes resources assert.NoError(b, processor.ConsumeTraces(context.Background(), ptrace.NewTraces())) } @@ -324,7 +324,7 @@ func benchmarkConsumeMetrics(b *testing.B, cfg *Config) { processor, _ := factory.CreateMetrics(context.Background(), processortest.NewNopSettings(), cfg, sink) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { // TODO use testbed.PerfTestDataProvider here once that includes resources assert.NoError(b, processor.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) } @@ -346,7 +346,7 @@ func benchmarkConsumeLogs(b *testing.B, cfg *Config) { processor, _ := factory.CreateLogs(context.Background(), processortest.NewNopSettings(), cfg, sink) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { // TODO use testbed.PerfTestDataProvider here once that includes resources assert.NoError(b, processor.ConsumeLogs(context.Background(), plog.NewLogs())) } @@ -368,7 +368,7 @@ func benchmarkConsumeProfiles(b *testing.B, cfg *Config) { processor, _ := factory.(xprocessor.Factory).CreateProfiles(context.Background(), processortest.NewNopSettings(), cfg, sink) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { // TODO use testbed.PerfTestDataProvider here once that includes resources assert.NoError(b, processor.ConsumeProfiles(context.Background(), pprofile.NewProfiles())) } diff --git a/processor/resourceprocessor/resource_processor.go b/processor/resourceprocessor/resource_processor.go index a7588418415f0..4abd81850a717 100644 --- a/processor/resourceprocessor/resource_processor.go +++ b/processor/resourceprocessor/resource_processor.go @@ -21,7 +21,7 @@ type resourceProcessor struct { func (rp *resourceProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { rp.attrProc.Process(ctx, rp.logger, rss.At(i).Resource().Attributes()) } return td, nil @@ -29,7 +29,7 @@ func (rp *resourceProcessor) processTraces(ctx context.Context, td ptrace.Traces func (rp *resourceProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { rp.attrProc.Process(ctx, rp.logger, rms.At(i).Resource().Attributes()) } return md, nil @@ -37,7 +37,7 @@ func (rp *resourceProcessor) processMetrics(ctx context.Context, md pmetric.Metr func (rp *resourceProcessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { rp.attrProc.Process(ctx, rp.logger, rls.At(i).Resource().Attributes()) } return ld, nil diff --git a/processor/routingprocessor/logs.go b/processor/routingprocessor/logs.go index 2a613e1cfdaa3..5fc719909a47e 100644 --- a/processor/routingprocessor/logs.go +++ b/processor/routingprocessor/logs.go @@ -106,7 +106,7 @@ func (p *logProcessor) route(ctx context.Context, l plog.Logs) error { groups := map[string]logsGroup{} var errs error - for i := 0; i < l.ResourceLogs().Len(); i++ { + for i := range l.ResourceLogs().Len() { rlogs := l.ResourceLogs().At(i) ltx := ottllog.NewTransformContext( plog.NewLogRecord(), @@ -166,7 +166,7 @@ func (p *logProcessor) group( func (p *logProcessor) recordNonRoutedResourceLogs(ctx context.Context, routingKey string, rlogs plog.ResourceLogs) { logRecordsCount := 0 sl := rlogs.ScopeLogs() - for j := 0; j < sl.Len(); j++ { + for j := range sl.Len() { logRecordsCount += sl.At(j).LogRecords().Len() } diff --git a/processor/routingprocessor/metrics.go b/processor/routingprocessor/metrics.go index a82203205ac9b..32fe856e973b4 100644 --- a/processor/routingprocessor/metrics.go +++ b/processor/routingprocessor/metrics.go @@ -102,7 +102,7 @@ func (p *metricsProcessor) route(ctx context.Context, tm pmetric.Metrics) error var errs error - for i := 0; i < tm.ResourceMetrics().Len(); i++ { + for i := range tm.ResourceMetrics().Len() { rmetrics := tm.ResourceMetrics().At(i) mtx := ottldatapoint.NewTransformContext( nil, @@ -165,7 +165,7 @@ func (p *metricsProcessor) group( func (p *metricsProcessor) recordNonRoutedForResourceMetrics(ctx context.Context, routingKey string, rm pmetric.ResourceMetrics) { metricPointsCount := 0 sm := rm.ScopeMetrics() - for j := 0; j < sm.Len(); j++ { + for j := range sm.Len() { metricPointsCount += sm.At(j).Metrics().Len() } diff --git a/processor/routingprocessor/metrics_test.go b/processor/routingprocessor/metrics_test.go index b1bf73f89c7fb..0b72248e731ef 100644 --- a/processor/routingprocessor/metrics_test.go +++ b/processor/routingprocessor/metrics_test.go @@ -322,7 +322,7 @@ func Benchmark_MetricsRouting_ResourceAttribute(b *testing.B) { assert.NoError(b, exp.Start(context.Background(), host)) - for i := 0; i < b.N; i++ { + for range b.N { m := pmetric.NewMetrics() rm := m.ResourceMetrics().AppendEmpty() diff --git a/processor/routingprocessor/traces.go b/processor/routingprocessor/traces.go index 7c00e0cf65f3d..6118d9f7d517c 100644 --- a/processor/routingprocessor/traces.go +++ b/processor/routingprocessor/traces.go @@ -103,7 +103,7 @@ func (p *tracesProcessor) route(ctx context.Context, t ptrace.Traces) error { groups := map[string]spanGroup{} var errs error - for i := 0; i < t.ResourceSpans().Len(); i++ { + for i := range t.ResourceSpans().Len() { rspans := t.ResourceSpans().At(i) stx := ottlspan.NewTransformContext( ptrace.NewSpan(), @@ -159,7 +159,7 @@ func (p *tracesProcessor) group(key string, groups map[string]spanGroup, exporte func (p *tracesProcessor) recordNonRoutedResourceSpans(ctx context.Context, routingKey string, rspans ptrace.ResourceSpans) { spanCount := 0 ilss := rspans.ScopeSpans() - for j := 0; j < ilss.Len(); j++ { + for j := range ilss.Len() { spanCount += ilss.At(j).Spans().Len() } diff --git a/processor/schemaprocessor/internal/changelist/changelist.go b/processor/schemaprocessor/internal/changelist/changelist.go index 2ed2539893409..ebb168a2b4ed6 100644 --- a/processor/schemaprocessor/internal/changelist/changelist.go +++ b/processor/schemaprocessor/internal/changelist/changelist.go @@ -21,7 +21,7 @@ type ChangeList struct { } func (c ChangeList) Do(ss migrate.StateSelector, signal any) error { - for i := 0; i < len(c.Migrators); i++ { + for i := range len(c.Migrators) { var migrator migrate.Migrator // todo(ankit) in go1.23 switch to reversed iterators for this if ss == migrate.StateSelectorApply { diff --git a/processor/schemaprocessor/internal/fixture/parallel.go b/processor/schemaprocessor/internal/fixture/parallel.go index c7c0eb5233de1..7291d70cfd41e 100644 --- a/processor/schemaprocessor/internal/fixture/parallel.go +++ b/processor/schemaprocessor/internal/fixture/parallel.go @@ -34,7 +34,7 @@ func ParallelRaceCompute(tb testing.TB, count int, fn func() error) { start = make(chan struct{}) wg sync.WaitGroup ) - for i := 0; i < count; i++ { + for range count { wg.Add(1) go func() { defer wg.Done() diff --git a/processor/schemaprocessor/internal/transformer/attributes_operators.go b/processor/schemaprocessor/internal/transformer/attributes_operators.go index 3cce6ad877ffc..5c2cacfdca6e6 100644 --- a/processor/schemaprocessor/internal/transformer/attributes_operators.go +++ b/processor/schemaprocessor/internal/transformer/attributes_operators.go @@ -29,35 +29,35 @@ func (o MetricAttributes) Do(ss migrate.StateSelector, metric pmetric.Metric) er var datam alias.Attributed switch metric.Type() { case pmetric.MetricTypeExponentialHistogram: - for dp := 0; dp < metric.ExponentialHistogram().DataPoints().Len(); dp++ { + for dp := range metric.ExponentialHistogram().DataPoints().Len() { datam = metric.ExponentialHistogram().DataPoints().At(dp) if err := o.AttributeChange.Do(ss, datam.Attributes()); err != nil { return err } } case pmetric.MetricTypeHistogram: - for dp := 0; dp < metric.Histogram().DataPoints().Len(); dp++ { + for dp := range metric.Histogram().DataPoints().Len() { datam = metric.Histogram().DataPoints().At(dp) if err := o.AttributeChange.Do(ss, datam.Attributes()); err != nil { return err } } case pmetric.MetricTypeGauge: - for dp := 0; dp < metric.Gauge().DataPoints().Len(); dp++ { + for dp := range metric.Gauge().DataPoints().Len() { datam = metric.Gauge().DataPoints().At(dp) if err := o.AttributeChange.Do(ss, datam.Attributes()); err != nil { return err } } case pmetric.MetricTypeSum: - for dp := 0; dp < metric.Sum().DataPoints().Len(); dp++ { + for dp := range metric.Sum().DataPoints().Len() { datam = metric.Sum().DataPoints().At(dp) if err := o.AttributeChange.Do(ss, datam.Attributes()); err != nil { return err } } case pmetric.MetricTypeSummary: - for dp := 0; dp < metric.Summary().DataPoints().Len(); dp++ { + for dp := range metric.Summary().DataPoints().Len() { datam = metric.Summary().DataPoints().At(dp) if err := o.AttributeChange.Do(ss, datam.Attributes()); err != nil { return err diff --git a/processor/schemaprocessor/internal/transformer/conditional_attributes.go b/processor/schemaprocessor/internal/transformer/conditional_attributes.go index f7f0e874d5289..ba72d05e45294 100644 --- a/processor/schemaprocessor/internal/transformer/conditional_attributes.go +++ b/processor/schemaprocessor/internal/transformer/conditional_attributes.go @@ -26,35 +26,35 @@ func (o MetricDataPointAttributes) Do(ss migrate.StateSelector, metric pmetric.M var datam alias.Attributed switch metric.Type() { case pmetric.MetricTypeExponentialHistogram: - for dp := 0; dp < metric.ExponentialHistogram().DataPoints().Len(); dp++ { + for dp := range metric.ExponentialHistogram().DataPoints().Len() { datam = metric.ExponentialHistogram().DataPoints().At(dp) if err := o.ConditionalAttributeChange.Do(ss, datam.Attributes(), metric.Name()); err != nil { return err } } case pmetric.MetricTypeHistogram: - for dp := 0; dp < metric.Histogram().DataPoints().Len(); dp++ { + for dp := range metric.Histogram().DataPoints().Len() { datam = metric.Histogram().DataPoints().At(dp) if err := o.ConditionalAttributeChange.Do(ss, datam.Attributes(), metric.Name()); err != nil { return err } } case pmetric.MetricTypeGauge: - for dp := 0; dp < metric.Gauge().DataPoints().Len(); dp++ { + for dp := range metric.Gauge().DataPoints().Len() { datam = metric.Gauge().DataPoints().At(dp) if err := o.ConditionalAttributeChange.Do(ss, datam.Attributes(), metric.Name()); err != nil { return err } } case pmetric.MetricTypeSum: - for dp := 0; dp < metric.Sum().DataPoints().Len(); dp++ { + for dp := range metric.Sum().DataPoints().Len() { datam = metric.Sum().DataPoints().At(dp) if err := o.ConditionalAttributeChange.Do(ss, datam.Attributes(), metric.Name()); err != nil { return err } } case pmetric.MetricTypeSummary: - for dp := 0; dp < metric.Summary().DataPoints().Len(); dp++ { + for dp := range metric.Summary().DataPoints().Len() { datam = metric.Summary().DataPoints().At(dp) if err := o.ConditionalAttributeChange.Do(ss, datam.Attributes(), metric.Name()); err != nil { return err diff --git a/processor/schemaprocessor/internal/transformer/multi_conditional_attributes.go b/processor/schemaprocessor/internal/transformer/multi_conditional_attributes.go index 8ebbb429739b3..8a694318d7cc5 100644 --- a/processor/schemaprocessor/internal/transformer/multi_conditional_attributes.go +++ b/processor/schemaprocessor/internal/transformer/multi_conditional_attributes.go @@ -18,7 +18,7 @@ type SpanEventConditionalAttributes struct { func (o SpanEventConditionalAttributes) IsMigrator() {} func (o SpanEventConditionalAttributes) Do(ss migrate.StateSelector, span ptrace.Span) error { - for e := 0; e < span.Events().Len(); e++ { + for e := range span.Events().Len() { event := span.Events().At(e) if err := o.MultiConditionalAttributeSet.Do(ss, event.Attributes(), map[string]string{ diff --git a/processor/schemaprocessor/internal/transformer/signal_name.go b/processor/schemaprocessor/internal/transformer/signal_name.go index 7d11b8cac60e0..0f565c6f956cc 100644 --- a/processor/schemaprocessor/internal/transformer/signal_name.go +++ b/processor/schemaprocessor/internal/transformer/signal_name.go @@ -19,7 +19,7 @@ type SpanEventSignalNameChange struct { func (c SpanEventSignalNameChange) IsMigrator() {} func (c SpanEventSignalNameChange) Do(ss migrate.StateSelector, span ptrace.Span) error { - for e := 0; e < span.Events().Len(); e++ { + for e := range span.Events().Len() { event := span.Events().At(e) c.SignalNameChange.Do(ss, event) } diff --git a/processor/schemaprocessor/internal/translation/translation.go b/processor/schemaprocessor/internal/translation/translation.go index 71aeabeab6255..4cff745d7e54f 100644 --- a/processor/schemaprocessor/internal/translation/translation.go +++ b/processor/schemaprocessor/internal/translation/translation.go @@ -180,7 +180,7 @@ func (t *translator) ApplyScopeLogChanges(scopeLogs plog.ScopeLogs, inSchemaURL return nil } for rev, more := it(); more; rev, more = it() { - for l := 0; l < scopeLogs.LogRecords().Len(); l++ { + for l := range scopeLogs.LogRecords().Len() { log := scopeLogs.LogRecords().At(l) switch status { case Update: @@ -215,7 +215,7 @@ func (t *translator) ApplyScopeSpanChanges(scopeSpans ptrace.ScopeSpans, inSchem } it, status := t.iterator(ver) for rev, more := it(); more; rev, more = it() { - for i := 0; i < scopeSpans.Spans().Len(); i++ { + for i := range scopeSpans.Spans().Len() { span := scopeSpans.Spans().At(i) switch status { case Update: @@ -227,7 +227,7 @@ func (t *translator) ApplyScopeSpanChanges(scopeSpans ptrace.ScopeSpans, inSchem if err != nil { return err } - for e := 0; e < span.Events().Len(); e++ { + for e := range span.Events().Len() { event := span.Events().At(e) err = rev.all.Apply(event) if err != nil { @@ -241,7 +241,7 @@ func (t *translator) ApplyScopeSpanChanges(scopeSpans ptrace.ScopeSpans, inSchem if err = rev.spanEvents.Rollback(span); err != nil { return err } - for e := 0; e < span.Events().Len(); e++ { + for e := range span.Events().Len() { event := span.Events().At(e) err = rev.all.Rollback(event) if err != nil { @@ -270,7 +270,7 @@ func (t *translator) ApplyScopeMetricChanges(scopeMetrics pmetric.ScopeMetrics, } it, status := t.iterator(ver) for rev, more := it(); more; rev, more = it() { - for i := 0; i < scopeMetrics.Metrics().Len(); i++ { + for i := range scopeMetrics.Metrics().Len() { metric := scopeMetrics.Metrics().At(i) switch status { case Update: diff --git a/processor/schemaprocessor/internal/translation/translation_helpers_test.go b/processor/schemaprocessor/internal/translation/translation_helpers_test.go index 3f7d1cceaa367..43c98ebf8014d 100644 --- a/processor/schemaprocessor/internal/translation/translation_helpers_test.go +++ b/processor/schemaprocessor/internal/translation/translation_helpers_test.go @@ -47,7 +47,7 @@ func NewExampleLogs(tb testing.TB, at Version) plog.Logs { logs := plog.NewLogs() - for i := 0; i < 10; i++ { + for range 10 { log := logs.ResourceLogs().AppendEmpty() log.SetSchemaUrl(schemaURL) @@ -100,14 +100,14 @@ func NewExampleMetrics(tb testing.TB, at Version) pmetric.Metrics { schemaURL := fmt.Sprint("https://example.com/", at.String()) metrics := pmetric.NewMetrics() - for i := 0; i < 10; i++ { + for range 10 { metric := metrics.ResourceMetrics().AppendEmpty() metric.SetSchemaUrl(schemaURL) sMetric := metric.ScopeMetrics().AppendEmpty() sMetric.SetSchemaUrl(schemaURL) - for j := 0; j < 5; j++ { + for range 5 { switch at { case Version{1, 7, 0}, Version{1, 5, 0}: metric.Resource().Attributes().PutStr("test.name", tb.Name()) @@ -253,7 +253,7 @@ func NewExampleSpans(tb testing.TB, at Version) ptrace.Traces { schemaURL := fmt.Sprint("https://example.com/", at.String()) traces := ptrace.NewTraces() - for i := 0; i < 10; i++ { + for range 10 { traces := traces.ResourceSpans().AppendEmpty() traces.SetSchemaUrl(schemaURL) diff --git a/processor/schemaprocessor/internal/translation/translation_race_test.go b/processor/schemaprocessor/internal/translation/translation_race_test.go index bb971441e7ede..fbdf6884b5218 100644 --- a/processor/schemaprocessor/internal/translation/translation_race_test.go +++ b/processor/schemaprocessor/internal/translation/translation_race_test.go @@ -23,15 +23,15 @@ func TestRaceTranslationSpanChanges(t *testing.T) { require.NoError(t, err, "Must not error when creating translator") fixture.ParallelRaceCompute(t, 10, func() error { - for i := 0; i < 10; i++ { + for range 10 { v := &Version{1, 0, 0} spans := NewExampleSpans(t, *v) - for i := 0; i < spans.ResourceSpans().Len(); i++ { + for i := range spans.ResourceSpans().Len() { rSpan := spans.ResourceSpans().At(i) if err := tn.ApplyAllResourceChanges(rSpan, rSpan.SchemaUrl()); err != nil { return err } - for j := 0; j < rSpan.ScopeSpans().Len(); j++ { + for j := range rSpan.ScopeSpans().Len() { span := rSpan.ScopeSpans().At(j) if err := tn.ApplyScopeSpanChanges(span, span.SchemaUrl()); err != nil { return err @@ -54,15 +54,15 @@ func TestRaceTranslationMetricChanges(t *testing.T) { require.NoError(t, err, "Must not error when creating translator") fixture.ParallelRaceCompute(t, 10, func() error { - for i := 0; i < 10; i++ { + for range 10 { spans := NewExampleSpans(t, Version{1, 0, 0}) - for i := 0; i < spans.ResourceSpans().Len(); i++ { + for i := range spans.ResourceSpans().Len() { rSpan := spans.ResourceSpans().At(i) err := tn.ApplyAllResourceChanges(rSpan, rSpan.SchemaUrl()) if err != nil { return err } - for j := 0; j < rSpan.ScopeSpans().Len(); j++ { + for j := range rSpan.ScopeSpans().Len() { span := rSpan.ScopeSpans().At(j) err := tn.ApplyScopeSpanChanges(span, span.SchemaUrl()) if err != nil { @@ -86,15 +86,15 @@ func TestRaceTranslationLogChanges(t *testing.T) { require.NoError(t, err, "Must not error when creating translator") fixture.ParallelRaceCompute(t, 10, func() error { - for i := 0; i < 10; i++ { + for range 10 { metrics := NewExampleMetrics(t, Version{1, 0, 0}) - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rMetrics := metrics.ResourceMetrics().At(i) err := tn.ApplyAllResourceChanges(rMetrics, rMetrics.SchemaUrl()) if err != nil { return err } - for j := 0; j < rMetrics.ScopeMetrics().Len(); j++ { + for j := range rMetrics.ScopeMetrics().Len() { metric := rMetrics.ScopeMetrics().At(j) err := tn.ApplyScopeMetricChanges(metric, metric.SchemaUrl()) if err != nil { diff --git a/processor/schemaprocessor/internal/translation/translation_test.go b/processor/schemaprocessor/internal/translation/translation_test.go index 5dcc9f1c5638f..ebf0670bd8b06 100644 --- a/processor/schemaprocessor/internal/translation/translation_test.go +++ b/processor/schemaprocessor/internal/translation/translation_test.go @@ -194,11 +194,11 @@ func TestTranslationSpanChanges(t *testing.T) { inSchemaURL := joinSchemaFamilyAndVersion("https://example.com/", &tc.income) spans := NewExampleSpans(t, tc.income) - for i := 0; i < spans.ResourceSpans().Len(); i++ { + for i := range spans.ResourceSpans().Len() { rSpan := spans.ResourceSpans().At(i) err := tn.ApplyAllResourceChanges(rSpan, inSchemaURL) require.NoError(t, err, "Must not error when applying resource changes") - for j := 0; j < rSpan.ScopeSpans().Len(); j++ { + for j := range rSpan.ScopeSpans().Len() { span := rSpan.ScopeSpans().At(j) err = tn.ApplyScopeSpanChanges(span, inSchemaURL) require.NoError(t, err, "Must not error when applying scope span changes") @@ -269,11 +269,11 @@ func TestTranslationLogChanges(t *testing.T) { inSchemaURL := joinSchemaFamilyAndVersion("https://example.com/", &tc.income) logs := NewExampleLogs(t, tc.income) - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { rLogs := logs.ResourceLogs().At(i) err = tn.ApplyAllResourceChanges(rLogs, inSchemaURL) require.NoError(t, err, "Must not error when applying resource changes") - for j := 0; j < rLogs.ScopeLogs().Len(); j++ { + for j := range rLogs.ScopeLogs().Len() { log := rLogs.ScopeLogs().At(j) err = tn.ApplyScopeLogChanges(log, inSchemaURL) require.NoError(t, err, "Must not error when applying scope log changes") @@ -341,11 +341,11 @@ func TestTranslationMetricChanges(t *testing.T) { inSchemaURL := joinSchemaFamilyAndVersion("https://example.com/", &tc.income) metrics := NewExampleMetrics(t, tc.income) - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rMetrics := metrics.ResourceMetrics().At(i) err = tn.ApplyAllResourceChanges(rMetrics, inSchemaURL) require.NoError(t, err, "Must not error when applying resource changes") - for j := 0; j < rMetrics.ScopeMetrics().Len(); j++ { + for j := range rMetrics.ScopeMetrics().Len() { metric := rMetrics.ScopeMetrics().At(j) err := tn.ApplyScopeMetricChanges(metric, inSchemaURL) require.NoError(t, err, "Must not error when applying scope metric changes") @@ -370,11 +370,11 @@ func TestTranslationEquvialance_Logs(t *testing.T) { require.NoError(t, err, "Must not error creating translator") for _, logs := range []plog.Logs{a, b} { - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { rLogs := logs.ResourceLogs().At(i) err = tn.ApplyAllResourceChanges(rLogs, rLogs.SchemaUrl()) require.NoError(t, err, "Must not error when applying resource changes") - for j := 0; j < rLogs.ScopeLogs().Len(); j++ { + for j := range rLogs.ScopeLogs().Len() { log := rLogs.ScopeLogs().At(j) err = tn.ApplyScopeLogChanges(log, log.SchemaUrl()) require.NoError(t, err, "Must not error when applying scope log changes") @@ -399,11 +399,11 @@ func TestTranslationEquvialance_Metrics(t *testing.T) { require.NoError(t, err, "Must not error creating translator") for _, metrics := range []pmetric.Metrics{a, b} { - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rMetrics := metrics.ResourceMetrics().At(i) err = tn.ApplyAllResourceChanges(rMetrics, rMetrics.SchemaUrl()) require.NoError(t, err, "Must not error when applying resource changes") - for j := 0; j < rMetrics.ScopeMetrics().Len(); j++ { + for j := range rMetrics.ScopeMetrics().Len() { metric := rMetrics.ScopeMetrics().At(j) err = tn.ApplyScopeMetricChanges(metric, metric.SchemaUrl()) require.NoError(t, err, "Must not error when applying scope metric changes") @@ -428,11 +428,11 @@ func TestTranslationEquvialance_Traces(t *testing.T) { require.NoError(t, err, "Must not error creating translator") for _, traces := range []ptrace.Traces{a, b} { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rSpans := traces.ResourceSpans().At(i) err = tn.ApplyAllResourceChanges(rSpans, rSpans.SchemaUrl()) require.NoError(t, err, "Must not error when applying resource changes") - for j := 0; j < rSpans.ScopeSpans().Len(); j++ { + for j := range rSpans.ScopeSpans().Len() { spans := rSpans.ScopeSpans().At(j) err = tn.ApplyScopeSpanChanges(spans, spans.SchemaUrl()) require.NoError(t, err, "Must not error when applying scope span changes") @@ -450,7 +450,7 @@ func BenchmarkCreatingTranslation(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { tn, err := newTranslatorFromReader( log, "https://opentelemetry.io/schemas/1.9.0", @@ -474,16 +474,16 @@ func BenchmarkUpgradingMetrics(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { b.StopTimer() m := pmetric.NewMetrics() metrics.CopyTo(m) b.StartTimer() - for i := 0; i < m.ResourceMetrics().Len(); i++ { + for i := range m.ResourceMetrics().Len() { rMetrics := m.ResourceMetrics().At(i) err = tn.ApplyAllResourceChanges(rMetrics, rMetrics.SchemaUrl()) require.NoError(b, err, "Must not error when applying resource changes") - for j := 0; j < rMetrics.ScopeMetrics().Len(); j++ { + for j := range rMetrics.ScopeMetrics().Len() { metric := rMetrics.ScopeMetrics().At(j) err = tn.ApplyScopeMetricChanges(metric, metric.SchemaUrl()) require.NoError(b, err, "Must not error when applying scope metric changes") @@ -505,16 +505,16 @@ func BenchmarkUpgradingTraces(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { b.StopTimer() t := ptrace.NewTraces() traces.CopyTo(t) b.StartTimer() - for i := 0; i < t.ResourceSpans().Len(); i++ { + for i := range t.ResourceSpans().Len() { rSpans := t.ResourceSpans().At(i) err = tn.ApplyAllResourceChanges(rSpans, rSpans.SchemaUrl()) require.NoError(b, err, "Must not error when applying resource changes") - for j := 0; j < rSpans.ScopeSpans().Len(); j++ { + for j := range rSpans.ScopeSpans().Len() { spans := rSpans.ScopeSpans().At(j) err = tn.ApplyScopeSpanChanges(spans, spans.SchemaUrl()) require.NoError(b, err, "Must not error when applying scope span changes") @@ -536,16 +536,16 @@ func BenchmarkUpgradingLogs(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { b.StopTimer() l := plog.NewLogs() logs.CopyTo(l) b.StartTimer() - for i := 0; i < l.ResourceLogs().Len(); i++ { + for i := range l.ResourceLogs().Len() { rLogs := l.ResourceLogs().At(i) err = tn.ApplyAllResourceChanges(rLogs, rLogs.SchemaUrl()) require.NoError(b, err, "Must not error when applying resource changes") - for j := 0; j < rLogs.ScopeLogs().Len(); j++ { + for j := range rLogs.ScopeLogs().Len() { log := rLogs.ScopeLogs().At(j) err = tn.ApplyScopeLogChanges(log, log.SchemaUrl()) require.NoError(b, err, "Must not error when applying scope log changes") diff --git a/processor/schemaprocessor/internal/translation/version_test.go b/processor/schemaprocessor/internal/translation/version_test.go index 246e44c5c68cf..fd89ca2fc3327 100644 --- a/processor/schemaprocessor/internal/translation/version_test.go +++ b/processor/schemaprocessor/internal/translation/version_test.go @@ -122,7 +122,7 @@ func BenchmarkParsingVersion(b *testing.B) { b.ReportAllocs() b.StopTimer() - for i := 0; i < b.N; i++ { + for range b.N { var err error b.StartTimer() ver, err = NewVersion("1.16.9") diff --git a/processor/spanprocessor/span.go b/processor/spanprocessor/span.go index 0f2992dc4abec..cd146671a3fd1 100644 --- a/processor/spanprocessor/span.go +++ b/processor/spanprocessor/span.go @@ -68,15 +68,15 @@ func newSpanProcessor(config Config) (*spanProcessor, error) { func (sp *spanProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { rs := rss.At(i) ilss := rs.ScopeSpans() resource := rs.Resource() - for j := 0; j < ilss.Len(); j++ { + for j := range ilss.Len() { ils := ilss.At(j) spans := ils.Spans() scope := ils.Scope() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { span := spans.At(k) if sp.skipExpr != nil { skip, err := sp.skipExpr.Eval(ctx, ottlspan.NewTransformContext(span, scope, resource, ils, rs)) diff --git a/processor/sumologicprocessor/aggregate_attributes_processor.go b/processor/sumologicprocessor/aggregate_attributes_processor.go index 020eb425d3a12..4edd25f10a236 100644 --- a/processor/sumologicprocessor/aggregate_attributes_processor.go +++ b/processor/sumologicprocessor/aggregate_attributes_processor.go @@ -26,7 +26,7 @@ type aggregation struct { func newAggregateAttributesProcessor(config []aggregationPair) *aggregateAttributesProcessor { aggregations := []*aggregation{} - for i := 0; i < len(config); i++ { + for i := range config { pair := &aggregation{ attribute: config[i].Attribute, prefixes: config[i].Prefixes, @@ -38,16 +38,16 @@ func newAggregateAttributesProcessor(config []aggregationPair) *aggregateAttribu } func (proc *aggregateAttributesProcessor) processLogs(logs plog.Logs) error { - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { resourceLogs := logs.ResourceLogs().At(i) err := proc.processAttributes(resourceLogs.Resource().Attributes()) if err != nil { return err } - for j := 0; j < resourceLogs.ScopeLogs().Len(); j++ { + for j := range resourceLogs.ScopeLogs().Len() { scopeLogs := resourceLogs.ScopeLogs().At(j) - for k := 0; k < scopeLogs.LogRecords().Len(); k++ { + for k := range scopeLogs.LogRecords().Len() { err := proc.processAttributes(scopeLogs.LogRecords().At(k).Attributes()) if err != nil { return err @@ -59,16 +59,16 @@ func (proc *aggregateAttributesProcessor) processLogs(logs plog.Logs) error { } func (proc *aggregateAttributesProcessor) processMetrics(metrics pmetric.Metrics) error { - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { resourceMetrics := metrics.ResourceMetrics().At(i) err := proc.processAttributes(resourceMetrics.Resource().Attributes()) if err != nil { return err } - for j := 0; j < resourceMetrics.ScopeMetrics().Len(); j++ { + for j := range resourceMetrics.ScopeMetrics().Len() { scopeMetrics := resourceMetrics.ScopeMetrics().At(j) - for k := 0; k < scopeMetrics.Metrics().Len(); k++ { + for k := range scopeMetrics.Metrics().Len() { err := processMetricLevelAttributes(proc, scopeMetrics.Metrics().At(k)) if err != nil { return err @@ -80,16 +80,16 @@ func (proc *aggregateAttributesProcessor) processMetrics(metrics pmetric.Metrics } func (proc *aggregateAttributesProcessor) processTraces(traces ptrace.Traces) error { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { resourceSpans := traces.ResourceSpans().At(i) err := proc.processAttributes(resourceSpans.Resource().Attributes()) if err != nil { return err } - for j := 0; j < resourceSpans.ScopeSpans().Len(); j++ { + for j := range resourceSpans.ScopeSpans().Len() { scopeSpans := resourceSpans.ScopeSpans().At(j) - for k := 0; k < scopeSpans.Spans().Len(); k++ { + for k := range scopeSpans.Spans().Len() { err := proc.processAttributes(scopeSpans.Spans().At(k).Attributes()) if err != nil { return err @@ -109,12 +109,12 @@ func (*aggregateAttributesProcessor) ConfigPropertyName() string { } func (proc *aggregateAttributesProcessor) processAttributes(attributes pcommon.Map) error { - for i := 0; i < len(proc.aggregations); i++ { + for i := range len(proc.aggregations) { curr := proc.aggregations[i] names := []string{} attrs := []pcommon.Value{} - for j := 0; j < len(curr.prefixes); j++ { + for j := range len(curr.prefixes) { prefix := curr.prefixes[j] // Create a new map. Unused keys will be added here, // so we can check them against other prefixes. @@ -153,7 +153,7 @@ func (proc *aggregateAttributesProcessor) processAttributes(attributes pcommon.M if len(names) > 0 { aggregated := attributes.PutEmptyMap(curr.attribute) - for j := 0; j < len(names); j++ { + for j := range names { attrs[j].CopyTo(aggregated.PutEmpty(names[j])) } } diff --git a/processor/sumologicprocessor/attributes.go b/processor/sumologicprocessor/attributes.go index cdaf6c8687c4b..e6851efb092fa 100644 --- a/processor/sumologicprocessor/attributes.go +++ b/processor/sumologicprocessor/attributes.go @@ -23,7 +23,7 @@ func processMetricLevelAttributes(proc attributesProcessor, metric pmetric.Metri case pmetric.MetricTypeSum: dp := metric.Sum().DataPoints() - for i := 0; i < dp.Len(); i++ { + for i := range dp.Len() { err := proc.processAttributes(dp.At(i).Attributes()) if err != nil { return err @@ -33,7 +33,7 @@ func processMetricLevelAttributes(proc attributesProcessor, metric pmetric.Metri case pmetric.MetricTypeGauge: dp := metric.Gauge().DataPoints() - for i := 0; i < dp.Len(); i++ { + for i := range dp.Len() { err := proc.processAttributes(dp.At(i).Attributes()) if err != nil { return err @@ -43,7 +43,7 @@ func processMetricLevelAttributes(proc attributesProcessor, metric pmetric.Metri case pmetric.MetricTypeHistogram: dp := metric.Histogram().DataPoints() - for i := 0; i < dp.Len(); i++ { + for i := range dp.Len() { err := proc.processAttributes(dp.At(i).Attributes()) if err != nil { return err @@ -53,7 +53,7 @@ func processMetricLevelAttributes(proc attributesProcessor, metric pmetric.Metri case pmetric.MetricTypeExponentialHistogram: dp := metric.ExponentialHistogram().DataPoints() - for i := 0; i < dp.Len(); i++ { + for i := range dp.Len() { err := proc.processAttributes(dp.At(i).Attributes()) if err != nil { return err @@ -63,7 +63,7 @@ func processMetricLevelAttributes(proc attributesProcessor, metric pmetric.Metri case pmetric.MetricTypeSummary: dp := metric.Summary().DataPoints() - for i := 0; i < dp.Len(); i++ { + for i := range dp.Len() { err := proc.processAttributes(dp.At(i).Attributes()) if err != nil { return err diff --git a/processor/sumologicprocessor/cloud_namespace_processor.go b/processor/sumologicprocessor/cloud_namespace_processor.go index 3db7ed9ea68ba..a45b161e1d955 100644 --- a/processor/sumologicprocessor/cloud_namespace_processor.go +++ b/processor/sumologicprocessor/cloud_namespace_processor.go @@ -30,21 +30,21 @@ func newCloudNamespaceProcessor(addCloudNamespace bool) *cloudNamespaceProcessor } func (*cloudNamespaceProcessor) processLogs(logs plog.Logs) error { - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { addCloudNamespaceAttribute(logs.ResourceLogs().At(i).Resource().Attributes()) } return nil } func (*cloudNamespaceProcessor) processMetrics(metrics pmetric.Metrics) error { - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { addCloudNamespaceAttribute(metrics.ResourceMetrics().At(i).Resource().Attributes()) } return nil } func (*cloudNamespaceProcessor) processTraces(traces ptrace.Traces) error { - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { addCloudNamespaceAttribute(traces.ResourceSpans().At(i).Resource().Attributes()) } return nil diff --git a/processor/sumologicprocessor/log_fields_conversion_processor.go b/processor/sumologicprocessor/log_fields_conversion_processor.go index 3d7721ccedcee..7a2066286b7a6 100644 --- a/processor/sumologicprocessor/log_fields_conversion_processor.go +++ b/processor/sumologicprocessor/log_fields_conversion_processor.go @@ -117,12 +117,12 @@ func (proc *logFieldsConversionProcessor) processLogs(logs plog.Logs) error { } rls := logs.ResourceLogs() - for i := 0; i < rls.Len(); i++ { + for i := range rls.Len() { ills := rls.At(i).ScopeLogs() - for j := 0; j < ills.Len(); j++ { + for j := range ills.Len() { logs := ills.At(j).LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { proc.addAttributes(logs.At(k)) } } diff --git a/processor/sumologicprocessor/nesting_processor.go b/processor/sumologicprocessor/nesting_processor.go index 49ad38e9cc2eb..f067a6ea3a278 100644 --- a/processor/sumologicprocessor/nesting_processor.go +++ b/processor/sumologicprocessor/nesting_processor.go @@ -45,17 +45,17 @@ func (proc *NestingProcessor) processLogs(logs plog.Logs) error { return nil } - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { rl := logs.ResourceLogs().At(i) if err := proc.processAttributes(rl.Resource().Attributes()); err != nil { return err } - for j := 0; j < rl.ScopeLogs().Len(); j++ { + for j := range rl.ScopeLogs().Len() { logsRecord := rl.ScopeLogs().At(j).LogRecords() - for k := 0; k < logsRecord.Len(); k++ { + for k := range logsRecord.Len() { if err := proc.processAttributes(logsRecord.At(k).Attributes()); err != nil { return err } @@ -71,17 +71,17 @@ func (proc *NestingProcessor) processMetrics(metrics pmetric.Metrics) error { return nil } - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) if err := proc.processAttributes(rm.Resource().Attributes()); err != nil { return err } - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { metricsSlice := rm.ScopeMetrics().At(j).Metrics() - for k := 0; k < metricsSlice.Len(); k++ { + for k := range metricsSlice.Len() { if err := processMetricLevelAttributes(proc, metricsSlice.At(k)); err != nil { return err } @@ -97,17 +97,17 @@ func (proc *NestingProcessor) processTraces(traces ptrace.Traces) error { return nil } - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rs := traces.ResourceSpans().At(i) if err := proc.processAttributes(rs.Resource().Attributes()); err != nil { return err } - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { spans := rs.ScopeSpans().At(j).Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { if err := proc.processAttributes(spans.At(k).Attributes()); err != nil { return err } @@ -141,7 +141,7 @@ func (proc *NestingProcessor) processAttributes(attributes pcommon.Map) error { nextMap := prevValue.Map() newMap.CopyTo(nextMap) - for i := 0; i < len(keys); i++ { + for i := range keys { if prevValue.Type() != pcommon.ValueTypeMap { // If previous value was not a map, change it into a map. // The former value will be set under the key "". @@ -199,7 +199,7 @@ func (proc *NestingProcessor) processAttributes(attributes pcommon.Map) error { func (proc *NestingProcessor) shouldTranslateKey(k string) bool { if len(proc.allowlist) > 0 { isOk := false - for i := 0; i < len(proc.allowlist); i++ { + for i := range len(proc.allowlist) { if strings.HasPrefix(k, proc.allowlist[i]) { isOk = true break @@ -211,7 +211,7 @@ func (proc *NestingProcessor) shouldTranslateKey(k string) bool { } if len(proc.denylist) > 0 { - for i := 0; i < len(proc.denylist); i++ { + for i := range len(proc.denylist) { if strings.HasPrefix(k, proc.denylist[i]) { return false } diff --git a/processor/sumologicprocessor/processor_test.go b/processor/sumologicprocessor/processor_test.go index 02f31879a84f4..01433daf049dc 100644 --- a/processor/sumologicprocessor/processor_test.go +++ b/processor/sumologicprocessor/processor_test.go @@ -79,7 +79,7 @@ func TestAddCloudNamespaceForLogs(t *testing.T) { return inputLogs }, test: func(outputLogs plog.Logs) { - for i := 0; i < outputLogs.ResourceLogs().Len(); i++ { + for i := range outputLogs.ResourceLogs().Len() { _, found := outputLogs.ResourceLogs().At(i).Resource().Attributes().Get("cloud.namespace") assert.False(t, found) } @@ -205,7 +205,7 @@ func TestAddCloudNamespaceForMetrics(t *testing.T) { return inputMetrics }, test: func(outputMetrics pmetric.Metrics) { - for i := 0; i < outputMetrics.ResourceMetrics().Len(); i++ { + for i := range outputMetrics.ResourceMetrics().Len() { _, found := outputMetrics.ResourceMetrics().At(i).Resource().Attributes().Get("cloud.namespace") assert.False(t, found) } @@ -331,7 +331,7 @@ func TestAddCloudNamespaceForTraces(t *testing.T) { return inputTraces }, test: func(outputTraces ptrace.Traces) { - for i := 0; i < outputTraces.ResourceSpans().Len(); i++ { + for i := range outputTraces.ResourceSpans().Len() { _, found := outputTraces.ResourceSpans().At(i).Resource().Attributes().Get("cloud.namespace") assert.False(t, found) } diff --git a/processor/sumologicprocessor/translate_attributes_processor.go b/processor/sumologicprocessor/translate_attributes_processor.go index e157458deb334..5aae88310d067 100644 --- a/processor/sumologicprocessor/translate_attributes_processor.go +++ b/processor/sumologicprocessor/translate_attributes_processor.go @@ -51,7 +51,7 @@ func (proc *translateAttributesProcessor) processLogs(logs plog.Logs) error { return nil } - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { translateAttributes(logs.ResourceLogs().At(i).Resource().Attributes()) } @@ -63,7 +63,7 @@ func (proc *translateAttributesProcessor) processMetrics(metrics pmetric.Metrics return nil } - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { translateAttributes(metrics.ResourceMetrics().At(i).Resource().Attributes()) } diff --git a/processor/sumologicprocessor/translate_attributes_processor_test.go b/processor/sumologicprocessor/translate_attributes_processor_test.go index e7535e9dac00c..a298290a1b88a 100644 --- a/processor/sumologicprocessor/translate_attributes_processor_test.go +++ b/processor/sumologicprocessor/translate_attributes_processor_test.go @@ -137,7 +137,7 @@ var ( func BenchmarkTranslateAttributes(b *testing.B) { err := attributes.FromRaw(benchPdataAttributes) require.NoError(b, err) - for i := 0; i < b.N; i++ { + for range b.N { translateAttributes(attributes) } } diff --git a/processor/sumologicprocessor/translate_docker_metrics_processor.go b/processor/sumologicprocessor/translate_docker_metrics_processor.go index 2e8ee41ee29d0..efb463f815d09 100644 --- a/processor/sumologicprocessor/translate_docker_metrics_processor.go +++ b/processor/sumologicprocessor/translate_docker_metrics_processor.go @@ -90,14 +90,14 @@ func (proc *translateDockerMetricsProcessor) processMetrics(metrics pmetric.Metr return nil } - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) translateDockerResourceAttributes(rm.Resource().Attributes()) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { metricsSlice := rm.ScopeMetrics().At(j).Metrics() - for k := 0; k < metricsSlice.Len(); k++ { + for k := range metricsSlice.Len() { translateDockerMetric(metricsSlice.At(k)) } } diff --git a/processor/sumologicprocessor/translate_telegraf_metrics_processor.go b/processor/sumologicprocessor/translate_telegraf_metrics_processor.go index 2fbb200bae425..613de03ba154b 100644 --- a/processor/sumologicprocessor/translate_telegraf_metrics_processor.go +++ b/processor/sumologicprocessor/translate_telegraf_metrics_processor.go @@ -88,13 +88,13 @@ func (proc *translateTelegrafMetricsProcessor) processMetrics(metrics pmetric.Me return nil } - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { metricsSlice := rm.ScopeMetrics().At(j).Metrics() - for k := 0; k < metricsSlice.Len(); k++ { + for k := range metricsSlice.Len() { translateTelegrafMetric(metricsSlice.At(k)) } } diff --git a/processor/tailsamplingprocessor/internal/idbatcher/id_batcher.go b/processor/tailsamplingprocessor/internal/idbatcher/id_batcher.go index a9e22b7815af0..a835a48ec3a0a 100644 --- a/processor/tailsamplingprocessor/internal/idbatcher/id_batcher.go +++ b/processor/tailsamplingprocessor/internal/idbatcher/id_batcher.go @@ -77,7 +77,7 @@ func New(numBatches, newBatchesInitialCapacity, batchChannelSize uint64) (Batche // CloseCurrentAndTakeFirstBatch on a timer and want to delay the processing of the first // batch with actual data. This way there is no need for accounting on the client side and // a single timer can be started immediately. - for i := uint64(0); i < numBatches; i++ { + for range numBatches { batches <- nil } diff --git a/processor/tailsamplingprocessor/internal/idbatcher/id_batcher_test.go b/processor/tailsamplingprocessor/internal/idbatcher/id_batcher_test.go index 95d12ef3c4c40..c5f3a02b5e0cd 100644 --- a/processor/tailsamplingprocessor/internal/idbatcher/id_batcher_test.go +++ b/processor/tailsamplingprocessor/internal/idbatcher/id_batcher_test.go @@ -105,7 +105,7 @@ func concurrencyTest(t *testing.T, numBatches, newBatchesInitialCapacity, batchC // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/9126 concurrencyLimiter := make(chan struct{}, 128) defer close(concurrencyLimiter) - for i := 0; i < len(ids); i++ { + for i := range ids { wg.Add(1) concurrencyLimiter <- struct{}{} go func(id pcommon.TraceID) { @@ -136,14 +136,14 @@ func concurrencyTest(t *testing.T, numBatches, newBatchesInitialCapacity, batchC idSeen[id] = true } - for i := 0; i < len(ids); i++ { + for i := range ids { require.True(t, idSeen[ids[i]], "want id %v but id was not seen", ids[i]) } } func generateSequentialIDs(numIDs uint64) []pcommon.TraceID { ids := make([]pcommon.TraceID, numIDs) - for i := uint64(0); i < numIDs; i++ { + for i := range numIDs { traceID := [16]byte{} binary.BigEndian.PutUint64(traceID[:8], 0) binary.BigEndian.PutUint64(traceID[8:], i) diff --git a/processor/tailsamplingprocessor/internal/sampling/composite.go b/processor/tailsamplingprocessor/internal/sampling/composite.go index a88d32e3df31d..978bde332fd44 100644 --- a/processor/tailsamplingprocessor/internal/sampling/composite.go +++ b/processor/tailsamplingprocessor/internal/sampling/composite.go @@ -60,7 +60,7 @@ func NewComposite( ) PolicyEvaluator { var subpolicies []*subpolicy - for i := 0; i < len(subPolicyParams); i++ { + for i := range subPolicyParams { sub := &subpolicy{} sub.evaluator = subPolicyParams[i].Evaluator sub.allocatedSPS = subPolicyParams[i].MaxSpansPerSecond diff --git a/processor/tailsamplingprocessor/internal/sampling/composite_test.go b/processor/tailsamplingprocessor/internal/sampling/composite_test.go index d458643004946..a2bb55967c3e8 100644 --- a/processor/tailsamplingprocessor/internal/sampling/composite_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/composite_test.go @@ -220,7 +220,7 @@ func TestCompositeEvaluatorThrottling(t *testing.T) { trace := createTrace() // First totalSPS traces should be 100% Sampled - for i := 0; i < totalSPS; i++ { + for range totalSPS { decision, err := c.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate composite policy: %v", err) @@ -229,7 +229,7 @@ func TestCompositeEvaluatorThrottling(t *testing.T) { } // Now we hit the rate limit, so subsequent evaluations should result in 100% NotSampled - for i := 0; i < totalSPS; i++ { + for range totalSPS { decision, err := c.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate composite policy: %v", err) @@ -241,7 +241,7 @@ func TestCompositeEvaluatorThrottling(t *testing.T) { timeProvider.second++ // Subsequent sampling should be Sampled again because it is a new second. - for i := 0; i < totalSPS; i++ { + for range totalSPS { decision, err := c.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate composite policy: %v", err) @@ -264,7 +264,7 @@ func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) { // We have 2 subpolicies, so each should initially get half the bandwidth // First totalSPS/2 should be Sampled until we hit the rate limit - for i := 0; i < totalSPS/2; i++ { + for range totalSPS / 2 { decision, err := c.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate composite policy: %v", err) @@ -273,7 +273,7 @@ func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) { } // Now we hit the rate limit for second subpolicy, so subsequent evaluations should result in NotSampled - for i := 0; i < totalSPS/2; i++ { + for range totalSPS / 2 { decision, err := c.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate composite policy: %v", err) @@ -285,7 +285,7 @@ func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) { timeProvider.second++ // It is a new second, so we should start sampling again. - for i := 0; i < totalSPS/2; i++ { + for range totalSPS / 2 { decision, err := c.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate composite policy: %v", err) @@ -294,7 +294,7 @@ func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) { } // Now let's hit the hard limit and exceed the total by a factor of 2 - for i := 0; i < 2*totalSPS; i++ { + for range 2 * totalSPS { decision, err := c.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate composite policy: %v", err) @@ -306,7 +306,7 @@ func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) { timeProvider.second++ // It is a new second, so we should start sampling again. - for i := 0; i < totalSPS/2; i++ { + for range totalSPS / 2 { decision, err := c.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate composite policy: %v", err) diff --git a/processor/tailsamplingprocessor/internal/sampling/ottl.go b/processor/tailsamplingprocessor/internal/sampling/ottl.go index 7d5f520ece503..c502579d668ae 100644 --- a/processor/tailsamplingprocessor/internal/sampling/ottl.go +++ b/processor/tailsamplingprocessor/internal/sampling/ottl.go @@ -65,13 +65,13 @@ func (ocf *ottlConditionFilter) Evaluate(ctx context.Context, traceID pcommon.Tr defer trace.Unlock() batches := trace.ReceivedBatches - for i := 0; i < batches.ResourceSpans().Len(); i++ { + for i := range batches.ResourceSpans().Len() { rs := batches.ResourceSpans().At(i) resource := rs.Resource() - for j := 0; j < rs.ScopeSpans().Len(); j++ { + for j := range rs.ScopeSpans().Len() { ss := rs.ScopeSpans().At(j) scope := ss.Scope() - for k := 0; k < ss.Spans().Len(); k++ { + for k := range ss.Spans().Len() { span := ss.Spans().At(k) var ( @@ -99,7 +99,7 @@ func (ocf *ottlConditionFilter) Evaluate(ctx context.Context, traceID pcommon.Tr // Span event evaluation if ocf.sampleSpanEventExpr != nil { spanEvents := span.Events() - for l := 0; l < spanEvents.Len(); l++ { + for l := range spanEvents.Len() { ok, err = ocf.sampleSpanEventExpr.Eval(ctx, ottlspanevent.NewTransformContext(spanEvents.At(l), span, scope, resource, ss, rs)) if err != nil { return Error, err diff --git a/processor/tailsamplingprocessor/internal/sampling/probabilistic_test.go b/processor/tailsamplingprocessor/internal/sampling/probabilistic_test.go index 4a8d35ae0da49..763df375d3ba6 100644 --- a/processor/tailsamplingprocessor/internal/sampling/probabilistic_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/probabilistic_test.go @@ -96,7 +96,7 @@ func genRandomTraceIDs(num int) (ids []pcommon.TraceID) { // be unpredictable. r := rand.New(rand.NewPCG(123, 456)) ids = make([]pcommon.TraceID, 0, num) - for i := 0; i < num; i++ { + for range num { traceID := [16]byte{} binary.BigEndian.PutUint64(traceID[:8], r.Uint64()) binary.BigEndian.PutUint64(traceID[8:], r.Uint64()) diff --git a/processor/tailsamplingprocessor/internal/sampling/span_count_sampler_test.go b/processor/tailsamplingprocessor/internal/sampling/span_count_sampler_test.go index 9560333226930..f1362d83d7ea3 100644 --- a/processor/tailsamplingprocessor/internal/sampling/span_count_sampler_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/span_count_sampler_test.go @@ -244,7 +244,7 @@ func newTraceWithMultipleSpans(numberSpans []int32) *TraceData { rs := traces.ResourceSpans().AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() - for r := 0; r < int(numberSpans[i]); r++ { + for range numberSpans[i] { span := ils.Spans().AppendEmpty() span.SetTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) span.SetSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}) diff --git a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go index e9ee3da867737..da7666fd5df5a 100644 --- a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go @@ -213,7 +213,7 @@ func BenchmarkStringTagFilterEvaluatePlainText(b *testing.B) { trace := newTraceStringAttrs(map[string]any{"example": "value"}, "", "") filter := NewStringAttributeFilter(componenttest.NewNopTelemetrySettings(), "example", []string{"value"}, false, 0, false) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _, err := filter.Evaluate(context.Background(), pcommon.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), trace) assert.NoError(b, err) } @@ -223,7 +223,7 @@ func BenchmarkStringTagFilterEvaluateRegex(b *testing.B) { trace := newTraceStringAttrs(map[string]any{"example": "grpc.health.v1.HealthCheck"}, "", "") filter := NewStringAttributeFilter(componenttest.NewNopTelemetrySettings(), "example", []string{"v[0-9]+.HealthCheck$"}, true, 0, false) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _, err := filter.Evaluate(context.Background(), pcommon.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), trace) assert.NoError(b, err) } diff --git a/processor/tailsamplingprocessor/internal/sampling/util.go b/processor/tailsamplingprocessor/internal/sampling/util.go index d8aedc686d3be..7cccda1de565a 100644 --- a/processor/tailsamplingprocessor/internal/sampling/util.go +++ b/processor/tailsamplingprocessor/internal/sampling/util.go @@ -15,7 +15,7 @@ func hasResourceOrSpanWithCondition( shouldSampleResource func(resource pcommon.Resource) bool, shouldSampleSpan func(span ptrace.Span) bool, ) Decision { - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { rs := td.ResourceSpans().At(i) resource := rs.Resource() @@ -37,7 +37,7 @@ func invertHasResourceOrSpanWithCondition( shouldSampleResource func(resource pcommon.Resource) bool, shouldSampleSpan func(span ptrace.Span) bool, ) Decision { - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { rs := td.ResourceSpans().At(i) resource := rs.Resource() @@ -54,7 +54,7 @@ func invertHasResourceOrSpanWithCondition( // hasSpanWithCondition iterates through all the instrumentation library spans until any callback returns true. func hasSpanWithCondition(td ptrace.Traces, shouldSample func(span ptrace.Span) bool) Decision { - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { rs := td.ResourceSpans().At(i) if hasInstrumentationLibrarySpanWithCondition(rs.ScopeSpans(), shouldSample) { @@ -65,10 +65,10 @@ func hasSpanWithCondition(td ptrace.Traces, shouldSample func(span ptrace.Span) } func hasInstrumentationLibrarySpanWithCondition(ilss ptrace.ScopeSpansSlice, check func(span ptrace.Span) bool) bool { - for i := 0; i < ilss.Len(); i++ { + for i := range ilss.Len() { ils := ilss.At(i) - for j := 0; j < ils.Spans().Len(); j++ { + for j := range ils.Spans().Len() { span := ils.Spans().At(j) if check(span) { @@ -80,10 +80,10 @@ func hasInstrumentationLibrarySpanWithCondition(ilss ptrace.ScopeSpansSlice, che } func invertHasInstrumentationLibrarySpanWithCondition(ilss ptrace.ScopeSpansSlice, check func(span ptrace.Span) bool) bool { - for i := 0; i < ilss.Len(); i++ { + for i := range ilss.Len() { ils := ilss.At(i) - for j := 0; j < ils.Spans().Len(); j++ { + for j := range ils.Spans().Len() { span := ils.Spans().At(j) if !check(span) { @@ -99,9 +99,9 @@ func SetAttrOnScopeSpans(data *TraceData, attrName string, attrKey string) { defer data.Mutex.Unlock() rs := data.ReceivedBatches.ResourceSpans() - for i := 0; i < rs.Len(); i++ { + for i := range rs.Len() { rss := rs.At(i) - for j := 0; j < rss.ScopeSpans().Len(); j++ { + for j := range rss.ScopeSpans().Len() { ss := rss.ScopeSpans().At(j) ss.Scope().Attributes().PutStr(attrName, attrKey) } diff --git a/processor/tailsamplingprocessor/internal/sampling/util_test.go b/processor/tailsamplingprocessor/internal/sampling/util_test.go index 270336fad5c15..3581e14c5c744 100644 --- a/processor/tailsamplingprocessor/internal/sampling/util_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/util_test.go @@ -60,10 +60,10 @@ func TestSetAttrOnScopeSpans_Many(t *testing.T) { } func BenchmarkSetAttrOnScopeSpans(b *testing.B) { - for n := 0; n < b.N; n++ { + for range b.N { traces := ptrace.NewTraces() - for i := 0; i < 5; i++ { + for range 5 { rs := traces.ResourceSpans().AppendEmpty() ss1 := rs.ScopeSpans().AppendEmpty() ss1.Spans().AppendEmpty() diff --git a/processor/tailsamplingprocessor/processor.go b/processor/tailsamplingprocessor/processor.go index d0f26c18d8090..5a4a636c50ced 100644 --- a/processor/tailsamplingprocessor/processor.go +++ b/processor/tailsamplingprocessor/processor.go @@ -453,7 +453,7 @@ func (tsp *tailSamplingSpanProcessor) makeDecision(id pcommon.TraceID, trace *sa // ConsumeTraces is required by the processor.Traces interface. func (tsp *tailSamplingSpanProcessor) ConsumeTraces(_ context.Context, td ptrace.Traces) error { resourceSpans := td.ResourceSpans() - for i := 0; i < resourceSpans.Len(); i++ { + for i := range resourceSpans.Len() { tsp.processTraces(resourceSpans.At(i)) } return nil @@ -462,12 +462,12 @@ func (tsp *tailSamplingSpanProcessor) ConsumeTraces(_ context.Context, td ptrace func (tsp *tailSamplingSpanProcessor) groupSpansByTraceKey(resourceSpans ptrace.ResourceSpans) map[pcommon.TraceID][]spanAndScope { idToSpans := make(map[pcommon.TraceID][]spanAndScope) ilss := resourceSpans.ScopeSpans() - for j := 0; j < ilss.Len(); j++ { + for j := range ilss.Len() { scope := ilss.At(j) spans := scope.Spans() is := scope.Scope() spansLen := spans.Len() - for k := 0; k < spansLen; k++ { + for k := range spansLen { span := spans.At(k) key := span.TraceID() idToSpans[key] = append(idToSpans[key], spanAndScope{ diff --git a/processor/tailsamplingprocessor/processor_benchmarks_test.go b/processor/tailsamplingprocessor/processor_benchmarks_test.go index dce596c06f55f..de3a28c547f33 100644 --- a/processor/tailsamplingprocessor/processor_benchmarks_test.go +++ b/processor/tailsamplingprocessor/processor_benchmarks_test.go @@ -34,7 +34,7 @@ func BenchmarkSampling(b *testing.B) { metrics := &policyMetrics{} sampleBatches := make([]*sampling.TraceData, 0, len(batches)) - for i := 0; i < len(batches); i++ { + for range batches { sampleBatches = append(sampleBatches, &sampling.TraceData{ ArrivalTime: time.Now(), // SpanCount: spanCount, @@ -42,7 +42,7 @@ func BenchmarkSampling(b *testing.B) { }) } - for i := 0; i < b.N; i++ { + for range b.N { for i, id := range traceIDs { _ = tsp.makeDecision(id, sampleBatches[i], metrics) } diff --git a/processor/tailsamplingprocessor/processor_test.go b/processor/tailsamplingprocessor/processor_test.go index 5ecfb60fbb5ce..82a4d1162b531 100644 --- a/processor/tailsamplingprocessor/processor_test.go +++ b/processor/tailsamplingprocessor/processor_test.go @@ -304,12 +304,12 @@ func TestConcurrentArrivalAndEvaluation(t *testing.T) { for _, batch := range batches { wg.Add(1) go func(td ptrace.Traces) { - for i := 0; i < 10; i++ { + for range 10 { assert.NoError(t, tsp.ConsumeTraces(context.Background(), td)) } <-evalStarted close(continueEvaluation) - for i := 0; i < 10; i++ { + for range 10 { assert.NoError(t, tsp.ConsumeTraces(context.Background(), td)) } wg.Done() @@ -347,7 +347,7 @@ func TestSequentialTraceMapSize(t *testing.T) { // On sequential insertion it is possible to know exactly which traces should be still on the map. tsp := sp.(*tailSamplingSpanProcessor) - for i := 0; i < len(traceIDs)-int(cfg.NumTraces); i++ { + for i := range len(traceIDs) - int(cfg.NumTraces) { _, ok := tsp.idToTrace.Load(traceIDs[i]) require.False(t, ok, "Found unexpected traceId[%d] still on map (id: %v)", i, traceIDs[i]) } @@ -671,13 +671,13 @@ func TestDecisionPolicyMetrics(t *testing.T) { func collectSpanIDs(trace ptrace.Traces) []pcommon.SpanID { var spanIDs []pcommon.SpanID - for i := 0; i < trace.ResourceSpans().Len(); i++ { + for i := range trace.ResourceSpans().Len() { ilss := trace.ResourceSpans().At(i).ScopeSpans() - for j := 0; j < ilss.Len(); j++ { + for j := range ilss.Len() { ils := ilss.At(j) - for k := 0; k < ils.Spans().Len(); k++ { + for k := range ils.Spans().Len() { span := ils.Spans().At(k) spanIDs = append(spanIDs, span.SpanID()) } @@ -702,7 +702,7 @@ func generateIDsAndBatches(numIDs int) ([]pcommon.TraceID, []ptrace.Traces) { traceIDs := make([]pcommon.TraceID, numIDs) spanID := 0 var tds []ptrace.Traces - for i := 0; i < numIDs; i++ { + for i := range numIDs { traceIDs[i] = uInt64ToTraceID(uint64(i)) // Send each span in a separate batch for j := 0; j <= i; j++ { diff --git a/processor/transformprocessor/internal/common/logs.go b/processor/transformprocessor/internal/common/logs.go index 2a3b883529926..8abd2ff79b83b 100644 --- a/processor/transformprocessor/internal/common/logs.go +++ b/processor/transformprocessor/internal/common/logs.go @@ -31,12 +31,12 @@ func (l logStatements) Context() ContextID { } func (l logStatements) ConsumeLogs(ctx context.Context, ld plog.Logs, cache *pcommon.Map) error { - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { rlogs := ld.ResourceLogs().At(i) - for j := 0; j < rlogs.ScopeLogs().Len(); j++ { + for j := range rlogs.ScopeLogs().Len() { slogs := rlogs.ScopeLogs().At(j) logs := slogs.LogRecords() - for k := 0; k < logs.Len(); k++ { + for k := range logs.Len() { tCtx := ottllog.NewTransformContext(logs.At(k), slogs.Scope(), rlogs.Resource(), slogs, rlogs, ottllog.WithCache(cache)) condition, err := l.BoolExpr.Eval(ctx, tCtx) if err != nil { diff --git a/processor/transformprocessor/internal/common/metrics.go b/processor/transformprocessor/internal/common/metrics.go index 8919df68d2a3a..2d93bd9111b6e 100644 --- a/processor/transformprocessor/internal/common/metrics.go +++ b/processor/transformprocessor/internal/common/metrics.go @@ -32,12 +32,12 @@ func (m metricStatements) Context() ContextID { } func (m metricStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics, cache *pcommon.Map) error { - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rmetrics := md.ResourceMetrics().At(i) - for j := 0; j < rmetrics.ScopeMetrics().Len(); j++ { + for j := range rmetrics.ScopeMetrics().Len() { smetrics := rmetrics.ScopeMetrics().At(j) metrics := smetrics.Metrics() - for k := 0; k < metrics.Len(); k++ { + for k := range metrics.Len() { tCtx := ottlmetric.NewTransformContext(metrics.At(k), smetrics.Metrics(), smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics, ottlmetric.WithCache(cache)) condition, err := m.BoolExpr.Eval(ctx, tCtx) if err != nil { @@ -65,12 +65,12 @@ func (d dataPointStatements) Context() ContextID { } func (d dataPointStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics, cache *pcommon.Map) error { - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rmetrics := md.ResourceMetrics().At(i) - for j := 0; j < rmetrics.ScopeMetrics().Len(); j++ { + for j := range rmetrics.ScopeMetrics().Len() { smetrics := rmetrics.ScopeMetrics().At(j) metrics := smetrics.Metrics() - for k := 0; k < metrics.Len(); k++ { + for k := range metrics.Len() { metric := metrics.At(k) transformContextOptions := []ottldatapoint.TransformContextOption{ottldatapoint.WithCache(cache)} var err error @@ -97,7 +97,7 @@ func (d dataPointStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metr } func (d dataPointStatements) handleNumberDataPoints(ctx context.Context, dps pmetric.NumberDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics, options []ottldatapoint.TransformContextOption) error { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics, options...) condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { @@ -114,7 +114,7 @@ func (d dataPointStatements) handleNumberDataPoints(ctx context.Context, dps pme } func (d dataPointStatements) handleHistogramDataPoints(ctx context.Context, dps pmetric.HistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics, options []ottldatapoint.TransformContextOption) error { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics, options...) condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { @@ -131,7 +131,7 @@ func (d dataPointStatements) handleHistogramDataPoints(ctx context.Context, dps } func (d dataPointStatements) handleExponentialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics, options []ottldatapoint.TransformContextOption) error { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics, options...) condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { @@ -148,7 +148,7 @@ func (d dataPointStatements) handleExponentialHistogramDataPoints(ctx context.Co } func (d dataPointStatements) handleSummaryDataPoints(ctx context.Context, dps pmetric.SummaryDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics, options []ottldatapoint.TransformContextOption) error { - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics, options...) condition, err := d.BoolExpr.Eval(ctx, tCtx) if err != nil { diff --git a/processor/transformprocessor/internal/common/processor.go b/processor/transformprocessor/internal/common/processor.go index 10d45afb197ef..8cab223d593cd 100644 --- a/processor/transformprocessor/internal/common/processor.go +++ b/processor/transformprocessor/internal/common/processor.go @@ -31,7 +31,7 @@ func (r resourceStatements) Context() ContextID { } func (r resourceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces, cache *pcommon.Map) error { - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { rspans := td.ResourceSpans().At(i) tCtx := ottlresource.NewTransformContext(rspans.Resource(), rspans, ottlresource.WithCache(cache)) condition, err := r.BoolExpr.Eval(ctx, tCtx) @@ -49,7 +49,7 @@ func (r resourceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces, } func (r resourceStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics, cache *pcommon.Map) error { - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rmetrics := md.ResourceMetrics().At(i) tCtx := ottlresource.NewTransformContext(rmetrics.Resource(), rmetrics, ottlresource.WithCache(cache)) condition, err := r.BoolExpr.Eval(ctx, tCtx) @@ -67,7 +67,7 @@ func (r resourceStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metri } func (r resourceStatements) ConsumeLogs(ctx context.Context, ld plog.Logs, cache *pcommon.Map) error { - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { rlogs := ld.ResourceLogs().At(i) tCtx := ottlresource.NewTransformContext(rlogs.Resource(), rlogs, ottlresource.WithCache(cache)) condition, err := r.BoolExpr.Eval(ctx, tCtx) @@ -96,9 +96,9 @@ func (s scopeStatements) Context() ContextID { } func (s scopeStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces, cache *pcommon.Map) error { - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { rspans := td.ResourceSpans().At(i) - for j := 0; j < rspans.ScopeSpans().Len(); j++ { + for j := range rspans.ScopeSpans().Len() { sspans := rspans.ScopeSpans().At(j) tCtx := ottlscope.NewTransformContext(sspans.Scope(), rspans.Resource(), sspans, ottlscope.WithCache(cache)) condition, err := s.BoolExpr.Eval(ctx, tCtx) @@ -117,9 +117,9 @@ func (s scopeStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces, ca } func (s scopeStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics, cache *pcommon.Map) error { - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rmetrics := md.ResourceMetrics().At(i) - for j := 0; j < rmetrics.ScopeMetrics().Len(); j++ { + for j := range rmetrics.ScopeMetrics().Len() { smetrics := rmetrics.ScopeMetrics().At(j) tCtx := ottlscope.NewTransformContext(smetrics.Scope(), rmetrics.Resource(), smetrics, ottlscope.WithCache(cache)) condition, err := s.BoolExpr.Eval(ctx, tCtx) @@ -138,9 +138,9 @@ func (s scopeStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metrics, } func (s scopeStatements) ConsumeLogs(ctx context.Context, ld plog.Logs, cache *pcommon.Map) error { - for i := 0; i < ld.ResourceLogs().Len(); i++ { + for i := range ld.ResourceLogs().Len() { rlogs := ld.ResourceLogs().At(i) - for j := 0; j < rlogs.ScopeLogs().Len(); j++ { + for j := range rlogs.ScopeLogs().Len() { slogs := rlogs.ScopeLogs().At(j) tCtx := ottlscope.NewTransformContext(slogs.Scope(), rlogs.Resource(), slogs, ottlscope.WithCache(cache)) condition, err := s.BoolExpr.Eval(ctx, tCtx) diff --git a/processor/transformprocessor/internal/common/traces.go b/processor/transformprocessor/internal/common/traces.go index 7719e563e5f67..ff7806608c4da 100644 --- a/processor/transformprocessor/internal/common/traces.go +++ b/processor/transformprocessor/internal/common/traces.go @@ -32,12 +32,12 @@ func (t traceStatements) Context() ContextID { } func (t traceStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces, cache *pcommon.Map) error { - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { rspans := td.ResourceSpans().At(i) - for j := 0; j < rspans.ScopeSpans().Len(); j++ { + for j := range rspans.ScopeSpans().Len() { sspans := rspans.ScopeSpans().At(j) spans := sspans.Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { tCtx := ottlspan.NewTransformContext(spans.At(k), sspans.Scope(), rspans.Resource(), sspans, rspans, ottlspan.WithCache(cache)) condition, err := t.BoolExpr.Eval(ctx, tCtx) if err != nil { @@ -65,15 +65,15 @@ func (s spanEventStatements) Context() ContextID { } func (s spanEventStatements) ConsumeTraces(ctx context.Context, td ptrace.Traces, cache *pcommon.Map) error { - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { rspans := td.ResourceSpans().At(i) - for j := 0; j < rspans.ScopeSpans().Len(); j++ { + for j := range rspans.ScopeSpans().Len() { sspans := rspans.ScopeSpans().At(j) spans := sspans.Spans() - for k := 0; k < spans.Len(); k++ { + for k := range spans.Len() { span := spans.At(k) spanEvents := span.Events() - for n := 0; n < spanEvents.Len(); n++ { + for n := range spanEvents.Len() { tCtx := ottlspanevent.NewTransformContext(spanEvents.At(n), span, sspans.Scope(), rspans.Resource(), sspans, rspans, ottlspanevent.WithCache(cache)) condition, err := s.BoolExpr.Eval(ctx, tCtx) if err != nil { diff --git a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go index 167d5293461d4..cf14b2f7f6e09 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go +++ b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go @@ -81,7 +81,7 @@ func convertExponentialHistToExplicitHist(distributionFn string, explicitBounds explicitHist.SetAggregationTemporality(metric.ExponentialHistogram().AggregationTemporality()) // map over each exponential histogram data point and calculate the bucket counts - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { expDataPoint := dps.At(i) bucketCounts := calculateBucketCounts(expDataPoint, explicitBounds, distFn) explicitHistDp := explicitHist.DataPoints().AppendEmpty() @@ -116,7 +116,7 @@ func calculateBucketCounts(dp pmetric.ExponentialHistogramDataPoint, boundaries bucketCounts[0] += zerocount } - for pos := 0; pos < posB.Len(); pos++ { + for pos := range posB.Len() { index := dp.Positive().Offset() + int32(pos) upper := math.Exp(float64(index+1) * factor) lower := math.Exp(float64(index) * factor) @@ -206,7 +206,7 @@ var uniformAlgorithm distAlgorithm = func(count uint64, ) { // Find the boundaries that intersect with the bucket range var start, end int - for start = 0; start < len(boundaries); start++ { + for start = range boundaries { if lower <= boundaries[start] { break } diff --git a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go index 4aede94561f58..27d2127a1acf8 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go +++ b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go @@ -711,7 +711,7 @@ func TestRandom_convert_exponential_hist_to_explicit_hist(t *testing.T) { dp.BucketCounts().Len()) var count uint64 - for i := 0; i < dp.BucketCounts().Len(); i++ { + for i := range dp.BucketCounts().Len() { count += dp.BucketCounts().At(i) } @@ -721,7 +721,7 @@ func TestRandom_convert_exponential_hist_to_explicit_hist(t *testing.T) { // even though the distribution is random, we know that for this // particular test case, the min value is 40, therefore the 1st 3 bucket // counts should be 0, as they represent values 10 - 30 - for i := 0; i < 3; i++ { + for i := range 3 { assert.Equal(t, uint64(0), dp.BucketCounts().At(i), "bucket %d", i) } diff --git a/processor/transformprocessor/internal/metrics/func_convert_summary_count_val_to_sum.go b/processor/transformprocessor/internal/metrics/func_convert_summary_count_val_to_sum.go index c329ba34fd0d5..1a84e219dc50b 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_summary_count_val_to_sum.go +++ b/processor/transformprocessor/internal/metrics/func_convert_summary_count_val_to_sum.go @@ -57,7 +57,7 @@ func convertSummaryCountValToSum(stringAggTemp string, monotonic bool) (ottl.Exp sumDps := sumMetric.Sum().DataPoints() dps := metric.Summary().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) sumDp := sumDps.AppendEmpty() dp.Attributes().CopyTo(sumDp.Attributes()) diff --git a/processor/transformprocessor/internal/metrics/func_convert_summary_sum_val_to_sum.go b/processor/transformprocessor/internal/metrics/func_convert_summary_sum_val_to_sum.go index 1deb6bdb27108..534e3b7165af6 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_summary_sum_val_to_sum.go +++ b/processor/transformprocessor/internal/metrics/func_convert_summary_sum_val_to_sum.go @@ -57,7 +57,7 @@ func convertSummarySumValToSum(stringAggTemp string, monotonic bool) (ottl.ExprF sumDps := sumMetric.Sum().DataPoints() dps := metric.Summary().DataPoints() - for i := 0; i < dps.Len(); i++ { + for i := range dps.Len() { dp := dps.At(i) sumDp := sumDps.AppendEmpty() dp.Attributes().CopyTo(sumDp.Attributes()) diff --git a/processor/transformprocessor/internal/metrics/func_extract_count_metric.go b/processor/transformprocessor/internal/metrics/func_extract_count_metric.go index 314adf0afc24b..1c2511c211b9c 100644 --- a/processor/transformprocessor/internal/metrics/func_extract_count_metric.go +++ b/processor/transformprocessor/internal/metrics/func_extract_count_metric.go @@ -53,17 +53,17 @@ func extractCountMetric(monotonic bool) (ottl.ExprFunc[ottlmetric.TransformConte switch metric.Type() { case pmetric.MetricTypeHistogram: dataPoints := metric.Histogram().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { addCountDataPoint(dataPoints.At(i), countMetric.Sum().DataPoints()) } case pmetric.MetricTypeExponentialHistogram: dataPoints := metric.ExponentialHistogram().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { addCountDataPoint(dataPoints.At(i), countMetric.Sum().DataPoints()) } case pmetric.MetricTypeSummary: dataPoints := metric.Summary().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { addCountDataPoint(dataPoints.At(i), countMetric.Sum().DataPoints()) } default: diff --git a/processor/transformprocessor/internal/metrics/func_extract_sum_metric.go b/processor/transformprocessor/internal/metrics/func_extract_sum_metric.go index 6c7d621acba81..72ec930b3b987 100644 --- a/processor/transformprocessor/internal/metrics/func_extract_sum_metric.go +++ b/processor/transformprocessor/internal/metrics/func_extract_sum_metric.go @@ -62,7 +62,7 @@ func extractSumMetric(monotonic bool) (ottl.ExprFunc[ottlmetric.TransformContext switch metric.Type() { case pmetric.MetricTypeHistogram: dataPoints := metric.Histogram().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { dataPoint := dataPoints.At(i) if dataPoint.HasSum() { addSumDataPoint(dataPoint, sumMetric.Sum().DataPoints()) @@ -70,7 +70,7 @@ func extractSumMetric(monotonic bool) (ottl.ExprFunc[ottlmetric.TransformContext } case pmetric.MetricTypeExponentialHistogram: dataPoints := metric.ExponentialHistogram().DataPoints() - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { dataPoint := dataPoints.At(i) if dataPoint.HasSum() { addSumDataPoint(dataPoint, sumMetric.Sum().DataPoints()) @@ -79,7 +79,7 @@ func extractSumMetric(monotonic bool) (ottl.ExprFunc[ottlmetric.TransformContext case pmetric.MetricTypeSummary: dataPoints := metric.Summary().DataPoints() // note that unlike Histograms, the Sum field is required for Summaries - for i := 0; i < dataPoints.Len(); i++ { + for i := range dataPoints.Len() { addSumDataPoint(dataPoints.At(i), sumMetric.Sum().DataPoints()) } default: diff --git a/processor/transformprocessor/internal/metrics/func_scale.go b/processor/transformprocessor/internal/metrics/func_scale.go index 60ad4305db9bf..d21a49cf026f6 100644 --- a/processor/transformprocessor/internal/metrics/func_scale.go +++ b/processor/transformprocessor/internal/metrics/func_scale.go @@ -78,12 +78,12 @@ func scaleExemplar(ex *pmetric.Exemplar, multiplier float64) { } func scaleSummarySlice(values pmetric.SummaryDataPointSlice, multiplier float64) { - for i := 0; i < values.Len(); i++ { + for i := range values.Len() { dp := values.At(i) dp.SetSum(dp.Sum() * multiplier) - for i := 0; i < dp.QuantileValues().Len(); i++ { + for i := range dp.QuantileValues().Len() { qv := dp.QuantileValues().At(i) qv.SetValue(qv.Value() * multiplier) } @@ -91,7 +91,7 @@ func scaleSummarySlice(values pmetric.SummaryDataPointSlice, multiplier float64) } func scaleHistogram(datapoints pmetric.HistogramDataPointSlice, multiplier float64) { - for i := 0; i < datapoints.Len(); i++ { + for i := range datapoints.Len() { dp := datapoints.At(i) if dp.HasSum() { @@ -116,7 +116,7 @@ func scaleHistogram(datapoints pmetric.HistogramDataPointSlice, multiplier float } func scaleMetric(points pmetric.NumberDataPointSlice, multiplier float64) { - for i := 0; i < points.Len(); i++ { + for i := range points.Len() { dp := points.At(i) switch dp.ValueType() { case pmetric.NumberDataPointValueTypeInt: diff --git a/processor/transformprocessor/internal/metrics/func_scale_test.go b/processor/transformprocessor/internal/metrics/func_scale_test.go index 7352532582923..ec2e4c7010e16 100644 --- a/processor/transformprocessor/internal/metrics/func_scale_test.go +++ b/processor/transformprocessor/internal/metrics/func_scale_test.go @@ -193,7 +193,7 @@ func getTestScalingHistogramMetric(count uint64, sum, minVal, maxVal float64, bo histogramDatapoint.SetMax(maxVal) histogramDatapoint.ExplicitBounds().FromRaw(bounds) histogramDatapoint.BucketCounts().FromRaw(bucketCounts) - for i := 0; i < len(exemplars); i++ { + for i := range exemplars { exemplar := histogramDatapoint.Exemplars().AppendEmpty() exemplar.SetTimestamp(1) exemplar.SetDoubleValue(exemplars[i]) diff --git a/processor/transformprocessor/internal/traces/processor_test.go b/processor/transformprocessor/internal/traces/processor_test.go index 005280c062dd6..9a8c3c53caec1 100644 --- a/processor/transformprocessor/internal/traces/processor_test.go +++ b/processor/transformprocessor/internal/traces/processor_test.go @@ -1385,7 +1385,7 @@ func BenchmarkTwoSpans(b *testing.B) { processor, err := NewProcessor([]common.ContextStatements{{Context: "span", Statements: tt.statements}}, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) assert.NoError(b, err) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { td := constructTraces() _, err = processor.ProcessTraces(context.Background(), td) assert.NoError(b, err) @@ -1415,7 +1415,7 @@ func BenchmarkHundredSpans(b *testing.B) { statements: func() []string { var statements []string statements = append(statements, `set(status.code, 1) where name == "operationA"`) - for i := 0; i < 99; i++ { + for range 99 { statements = append(statements, `keep_keys(attributes, ["http.method"]) where name == "unknownOperation"`) } return statements @@ -1427,7 +1427,7 @@ func BenchmarkHundredSpans(b *testing.B) { processor, err := NewProcessor([]common.ContextStatements{{Context: "span", Statements: tt.statements}}, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) assert.NoError(b, err) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { td := constructTracesNum(100) _, err = processor.ProcessTraces(context.Background(), td) assert.NoError(b, err) @@ -1453,7 +1453,7 @@ func constructTracesNum(num int) ptrace.Traces { td := ptrace.NewTraces() rs0 := td.ResourceSpans().AppendEmpty() rs0ils0 := rs0.ScopeSpans().AppendEmpty() - for i := 0; i < num; i++ { + for range num { fillSpanOne(rs0ils0.Spans().AppendEmpty()) } return td diff --git a/processor/transformprocessor/processor_test.go b/processor/transformprocessor/processor_test.go index d00fc9a1037f9..262c40b108c6a 100644 --- a/processor/transformprocessor/processor_test.go +++ b/processor/transformprocessor/processor_test.go @@ -115,7 +115,7 @@ func BenchmarkLogsWithoutFlatten(b *testing.B) { input, err := golden.ReadLogs(filepath.Join("testdata", "logs", "input.yaml")) require.NoError(b, err) - for n := 0; n < b.N; n++ { + for range b.N { assert.NoError(b, p.ConsumeLogs(context.Background(), input)) } } @@ -141,7 +141,7 @@ func BenchmarkLogsWithFlatten(b *testing.B) { input, err := golden.ReadLogs(filepath.Join("testdata", "logs", "input.yaml")) require.NoError(b, err) - for n := 0; n < b.N; n++ { + for range b.N { assert.NoError(b, p.ConsumeLogs(context.Background(), input)) } } diff --git a/receiver/aerospikereceiver/integration_test.go b/receiver/aerospikereceiver/integration_test.go index 567bd13a003a3..0204db519ff8d 100644 --- a/receiver/aerospikereceiver/integration_test.go +++ b/receiver/aerospikereceiver/integration_test.go @@ -175,7 +175,7 @@ func populateMetrics(host *as.Host) error { sibin := "bin2" // write 100 records to get some memory usage - for i := 0; i < 100; i++ { + for i := range 100 { var key *as.Key key, err = as.NewKey(ns, set, i) if err != nil { diff --git a/receiver/awscloudwatchreceiver/logs_test.go b/receiver/awscloudwatchreceiver/logs_test.go index e1558eba9316f..c6535bd613e0c 100644 --- a/receiver/awscloudwatchreceiver/logs_test.go +++ b/receiver/awscloudwatchreceiver/logs_test.go @@ -206,7 +206,7 @@ func TestAutodiscoverLimit(t *testing.T) { mc := &mockClient{} logGroups := []*cloudwatchlogs.LogGroup{} - for i := 0; i <= 100; i++ { + for i := range 101 { logGroups = append(logGroups, &cloudwatchlogs.LogGroup{ LogGroupName: aws.String(fmt.Sprintf("test log group: %d", i)), }) diff --git a/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver_test.go b/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver_test.go index fcacd2dd33fb8..bcdf18e9c0e00 100644 --- a/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver_test.go +++ b/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver_test.go @@ -113,9 +113,9 @@ func assertMetricValueEqual(t *testing.T, m pmetric.Metrics, metricName string, rm := m.ResourceMetrics().At(0) ilms := rm.ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { metricSlice := ilms.At(j).Metrics() - for i := 0; i < metricSlice.Len(); i++ { + for i := range metricSlice.Len() { metric := metricSlice.At(i) if metric.Name() == metricName { if metric.Type() == pmetric.MetricTypeGauge { diff --git a/receiver/awsfirehosereceiver/benchmark_test.go b/receiver/awsfirehosereceiver/benchmark_test.go index 32c485a07cf53..dcb1c57e94e13 100644 --- a/receiver/awsfirehosereceiver/benchmark_test.go +++ b/receiver/awsfirehosereceiver/benchmark_test.go @@ -64,7 +64,7 @@ func BenchmarkLogsConsumer_cwlogs(b *testing.B) { require.NoError(b, err) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { req := newTestRequest(body) recorder := httptest.NewRecorder() r.(http.Handler).ServeHTTP(recorder, req) @@ -120,7 +120,7 @@ func BenchmarkMetricsConsumer_cwmetrics(b *testing.B) { require.NoError(b, err) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { req := newTestRequest(body) recorder := httptest.NewRecorder() r.(http.Handler).ServeHTTP(recorder, req) @@ -136,7 +136,7 @@ func BenchmarkMetricsConsumer_cwmetrics(b *testing.B) { func makeCloudWatchLogRecord(rng *rand.Rand, numLogs, numLogGroups int) []byte { var buf bytes.Buffer w := gzip.NewWriter(&buf) - for i := 0; i < numLogs; i++ { + for i := range numLogs { group := rng.IntN(numLogGroups) fmt.Fprintf(w, `{"messageType":"DATA_MESSAGE","owner":"123","logGroup":"group_%d","logStream":"stream","logEvents":[{"id":"the_id","timestamp":1725594035523,"message":"message %d"}]}`, @@ -152,7 +152,7 @@ func makeCloudWatchLogRecord(rng *rand.Rand, numLogs, numLogGroups int) []byte { func makeCloudWatchMetricRecord(rng *rand.Rand, numMetrics, numStreams int) []byte { var buf bytes.Buffer - for i := 0; i < numMetrics; i++ { + for i := range numMetrics { stream := rng.IntN(numStreams) fmt.Fprintf(&buf, `{"metric_stream_name":"stream_%d","account_id":"1234567890","region":"us-east-1","namespace":"AWS/NATGateway","metric_name":"metric_%d","dimensions":{"NatGatewayId":"nat-01a4160dfb995b990"},"timestamp":1643916720000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Count"}`, diff --git a/receiver/awsfirehosereceiver/internal/unmarshaler/cwlog/unmarshaler_test.go b/receiver/awsfirehosereceiver/internal/unmarshaler/cwlog/unmarshaler_test.go index e1e329440327d..1a7d7b597a460 100644 --- a/receiver/awsfirehosereceiver/internal/unmarshaler/cwlog/unmarshaler_test.go +++ b/receiver/awsfirehosereceiver/internal/unmarshaler/cwlog/unmarshaler_test.go @@ -69,7 +69,7 @@ func TestUnmarshal(t *testing.T) { require.NotNil(t, got) require.Equal(t, testCase.wantResourceCount, got.ResourceLogs().Len()) gotLogCount := 0 - for i := 0; i < got.ResourceLogs().Len(); i++ { + for i := range got.ResourceLogs().Len() { rm := got.ResourceLogs().At(i) require.Equal(t, 1, rm.ScopeLogs().Len()) ilm := rm.ScopeLogs().At(0) diff --git a/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/unmarshaler_test.go b/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/unmarshaler_test.go index 49e35e33ae951..21008189bc994 100644 --- a/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/unmarshaler_test.go +++ b/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/unmarshaler_test.go @@ -75,12 +75,12 @@ func TestUnmarshal(t *testing.T) { require.Equal(t, testCase.wantResourceCount, got.ResourceMetrics().Len()) gotMetricCount := 0 gotDatapointCount := 0 - for i := 0; i < got.ResourceMetrics().Len(); i++ { + for i := range got.ResourceMetrics().Len() { rm := got.ResourceMetrics().At(i) require.Equal(t, 1, rm.ScopeMetrics().Len()) ilm := rm.ScopeMetrics().At(0) gotMetricCount += ilm.Metrics().Len() - for j := 0; j < ilm.Metrics().Len(); j++ { + for j := range ilm.Metrics().Len() { metric := ilm.Metrics().At(j) gotDatapointCount += metric.Summary().DataPoints().Len() } diff --git a/receiver/awsfirehosereceiver/internal/unmarshaler/otlpmetricstream/unmarshaler_test.go b/receiver/awsfirehosereceiver/internal/unmarshaler/otlpmetricstream/unmarshaler_test.go index 9e781953b9d4e..8c9a4e60e32d1 100644 --- a/receiver/awsfirehosereceiver/internal/unmarshaler/otlpmetricstream/unmarshaler_test.go +++ b/receiver/awsfirehosereceiver/internal/unmarshaler/otlpmetricstream/unmarshaler_test.go @@ -97,12 +97,12 @@ func TestUnmarshal(t *testing.T) { require.Equal(t, testCase.wantResourceCount, got.ResourceMetrics().Len()) gotMetricCount := 0 gotDatapointCount := 0 - for i := 0; i < got.ResourceMetrics().Len(); i++ { + for i := range got.ResourceMetrics().Len() { rm := got.ResourceMetrics().At(i) require.Equal(t, 1, rm.ScopeMetrics().Len()) ilm := rm.ScopeMetrics().At(0) gotMetricCount += ilm.Metrics().Len() - for j := 0; j < ilm.Metrics().Len(); j++ { + for j := range ilm.Metrics().Len() { metric := ilm.Metrics().At(j) gotDatapointCount += metric.Summary().DataPoints().Len() } diff --git a/receiver/awsfirehosereceiver/logs_receiver.go b/receiver/awsfirehosereceiver/logs_receiver.go index 98e5c49f9bcc2..6b2ca7a84edcd 100644 --- a/receiver/awsfirehosereceiver/logs_receiver.go +++ b/receiver/awsfirehosereceiver/logs_receiver.go @@ -75,7 +75,7 @@ func (c *logsConsumer) Consume(ctx context.Context, nextRecord nextRecordFunc, c } if commonAttributes != nil { - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { rm := logs.ResourceLogs().At(i) for k, v := range commonAttributes { if _, found := rm.Resource().Attributes().Get(k); !found { diff --git a/receiver/awsfirehosereceiver/metrics_receiver.go b/receiver/awsfirehosereceiver/metrics_receiver.go index 05e356c4873e5..b4e7bbe171568 100644 --- a/receiver/awsfirehosereceiver/metrics_receiver.go +++ b/receiver/awsfirehosereceiver/metrics_receiver.go @@ -77,7 +77,7 @@ func (c *metricsConsumer) Consume(ctx context.Context, nextRecord nextRecordFunc } if commonAttributes != nil { - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) for k, v := range commonAttributes { if _, found := rm.Resource().Attributes().Get(k); !found { diff --git a/receiver/awss3receiver/notifications.go b/receiver/awss3receiver/notifications.go index d30d0572baabb..cb2e37eedc086 100644 --- a/receiver/awss3receiver/notifications.go +++ b/receiver/awss3receiver/notifications.go @@ -103,7 +103,7 @@ func (n *opampNotifier) SendStatus(_ context.Context, message statusNotification if err != nil { return } - for attempt := 0; attempt < maxNotificationAttempts; attempt++ { + for attempt := range maxNotificationAttempts { sendingChan, sendingErr := n.handler.SendMessage("TimeBasedIngestStatus", bytes) switch { case sendingErr == nil: diff --git a/receiver/awss3receiver/notifications_test.go b/receiver/awss3receiver/notifications_test.go index 981358776a141..60bf72f4bffe8 100644 --- a/receiver/awss3receiver/notifications_test.go +++ b/receiver/awss3receiver/notifications_test.go @@ -268,7 +268,7 @@ func Test_opampNotifier_SendStatus_MaxRetries(t *testing.T) { }() require.Empty(t, doneChan) - for attempt := 0; attempt < maxNotificationAttempts; attempt++ { + for range maxNotificationAttempts { registry.pendingChannel <- struct{}{} } <-doneChan diff --git a/receiver/awsxrayreceiver/internal/udppoller/poller.go b/receiver/awsxrayreceiver/internal/udppoller/poller.go index d604bf95ef460..6acfd2d377ac5 100644 --- a/receiver/awsxrayreceiver/internal/udppoller/poller.go +++ b/receiver/awsxrayreceiver/internal/udppoller/poller.go @@ -117,7 +117,7 @@ func New(cfg *Config, set receiver.Settings) (Poller, error) { func (p *poller) Start(receiverLongTermCtx context.Context) { p.receiverLongLivedCtx = receiverLongTermCtx - for i := 0; i < p.maxPollerCount; i++ { + for range p.maxPollerCount { p.wg.Add(1) go p.poll() } diff --git a/receiver/carbonreceiver/internal/client/plaintext_client.go b/receiver/carbonreceiver/internal/client/plaintext_client.go index d40a3491b2322..42fad4366b0f7 100644 --- a/receiver/carbonreceiver/internal/client/plaintext_client.go +++ b/receiver/carbonreceiver/internal/client/plaintext_client.go @@ -103,7 +103,7 @@ func (g *Graphite) SendMetric(metric Metric) error { // SputterThenSendMetric method sends a bad partial metric, then the whole metric across. func (g *Graphite) SputterThenSendMetric(metric Metric) error { str := metric.String() - for i := 0; i < 5; i++ { + for range 5 { if _, err := fmt.Fprint(g.Conn, ""); err != nil { return err } diff --git a/receiver/carbonreceiver/protocol/regex_parser_test.go b/receiver/carbonreceiver/protocol/regex_parser_test.go index ef5f63eba5473..bd4deca5f7474 100644 --- a/receiver/carbonreceiver/protocol/regex_parser_test.go +++ b/receiver/carbonreceiver/protocol/regex_parser_test.go @@ -221,8 +221,8 @@ func Benchmark_regexPathParser_ParsePath(b *testing.B) { res.metricType = got.MetricType res.err = err - for n := 0; n < b.N; n++ { - for i := 0; i < len(tests); i++ { + for range b.N { + for i := range tests { err = rp.ParsePath(tests[i], &got) } } diff --git a/receiver/cloudfoundryreceiver/receiver.go b/receiver/cloudfoundryreceiver/receiver.go index c5d3deb06d3b3..e90ba7b4d3bcd 100644 --- a/receiver/cloudfoundryreceiver/receiver.go +++ b/receiver/cloudfoundryreceiver/receiver.go @@ -250,7 +250,7 @@ func getResourceMetrics(metrics pmetric.Metrics, envelope *loggregator_v2.Envelo } attrs := getEnvelopeResourceAttributes(envelope) - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { if reflect.DeepEqual(metrics.ResourceMetrics().At(i).Resource().Attributes().AsRaw(), attrs.AsRaw()) { return metrics.ResourceMetrics().At(i) } @@ -273,7 +273,7 @@ func getResourceLogs(logs plog.Logs, envelope *loggregator_v2.Envelope) plog.Res } attrs := getEnvelopeResourceAttributes(envelope) - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { if reflect.DeepEqual(logs.ResourceLogs().At(i).Resource().Attributes().AsRaw(), attrs.AsRaw()) { return logs.ResourceLogs().At(i) } diff --git a/receiver/collectdreceiver/receiver_test.go b/receiver/collectdreceiver/receiver_test.go index 0d5a26967c29c..6e3341e5871ac 100644 --- a/receiver/collectdreceiver/receiver_test.go +++ b/receiver/collectdreceiver/receiver_test.go @@ -212,7 +212,7 @@ func createWantedMetrics(wantedRequestBody wantedBody) pmetric.Metrics { } func assertMetricsAreEqual(t *testing.T, expectedData []pmetric.Metrics, actualData []pmetric.Metrics) { - for i := 0; i < len(expectedData); i++ { + for i := range expectedData { err := pmetrictest.CompareMetrics(expectedData[i], actualData[i]) require.NoError(t, err) } diff --git a/receiver/couchdbreceiver/metrics.go b/receiver/couchdbreceiver/metrics.go index f66bf8e9a954e..779e6f74877b0 100644 --- a/receiver/couchdbreceiver/metrics.go +++ b/receiver/couchdbreceiver/metrics.go @@ -134,7 +134,7 @@ func (c *couchdbScraper) recordCouchdbFileDescriptorOpenDataPoint(now pcommon.Ti func (c *couchdbScraper) recordCouchdbDatabaseOperationsDataPoint(now pcommon.Timestamp, stats map[string]any, errs *scrapererror.ScrapeErrors) { operations := []metadata.AttributeOperation{metadata.AttributeOperationReads, metadata.AttributeOperationWrites} keyPaths := [][]string{{"database_reads", "value"}, {"database_writes", "value"}} - for i := 0; i < len(operations); i++ { + for i := range operations { key := keyPaths[i] value, err := getValueFromBody(key, stats) if err != nil { diff --git a/receiver/datadogreceiver/internal/translator/service_check_translator_test.go b/receiver/datadogreceiver/internal/translator/service_check_translator_test.go index 84f9fbda52915..46819e13cc570 100644 --- a/receiver/datadogreceiver/internal/translator/service_check_translator_test.go +++ b/receiver/datadogreceiver/internal/translator/service_check_translator_test.go @@ -311,7 +311,7 @@ func TestTranslateCheckRunStatuses(t *testing.T) { requireScope(t, result, pcommon.NewMap(), component.NewDefaultBuildInfo().Version) metrics := result.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { metric := metrics.At(i) assert.Equal(t, tt.expectedStatus, metric.Gauge().DataPoints().At(0).IntValue()) } diff --git a/receiver/datadogreceiver/internal/translator/sketches_test.go b/receiver/datadogreceiver/internal/translator/sketches_test.go index 7603e80efb7fc..ad81d094573d1 100644 --- a/receiver/datadogreceiver/internal/translator/sketches_test.go +++ b/receiver/datadogreceiver/internal/translator/sketches_test.go @@ -395,7 +395,7 @@ func TestHandleInvalidBuckets(t *testing.T) { require.Equal(t, 2, metric.At(0).ExponentialHistogram().DataPoints().Len()) var lastTimestamp pcommon.Timestamp - for i := 0; i < metric.At(0).ExponentialHistogram().DataPoints().Len(); i++ { + for i := range metric.At(0).ExponentialHistogram().DataPoints().Len() { m := metric.At(0).ExponentialHistogram().DataPoints().At(i) if i == 0 { require.Equal(t, m.StartTimestamp(), pcommon.Timestamp(0)) @@ -515,7 +515,7 @@ func TestSketchTemporality(t *testing.T) { require.Equal(t, pmetric.MetricTypeExponentialHistogram, metric2.At(0).Type()) var lastTimestamp pcommon.Timestamp - for i := 0; i < metric1.At(0).ExponentialHistogram().DataPoints().Len(); i++ { + for i := range metric1.At(0).ExponentialHistogram().DataPoints().Len() { m := metric1.At(0).ExponentialHistogram().DataPoints().At(i) if i == 0 { require.Equal(t, m.StartTimestamp(), pcommon.Timestamp(0)) @@ -524,7 +524,7 @@ func TestSketchTemporality(t *testing.T) { } lastTimestamp = m.Timestamp() } - for i := 0; i < metric2.At(0).ExponentialHistogram().DataPoints().Len(); i++ { + for i := range metric2.At(0).ExponentialHistogram().DataPoints().Len() { m := metric2.At(0).ExponentialHistogram().DataPoints().At(i) if i == 0 { require.Equal(t, m.StartTimestamp(), pcommon.Timestamp(0)) diff --git a/receiver/datadogreceiver/internal/translator/stats_translator_test.go b/receiver/datadogreceiver/internal/translator/stats_translator_test.go index 0577345796f6e..56528e95de085 100644 --- a/receiver/datadogreceiver/internal/translator/stats_translator_test.go +++ b/receiver/datadogreceiver/internal/translator/stats_translator_test.go @@ -101,14 +101,14 @@ func TestTranslateStats(t *testing.T) { assert.NoError(t, err) var results []*pb.StatsPayload - for i := 0; i < mx.ResourceMetrics().Len(); i++ { + for i := range mx.ResourceMetrics().Len() { rm := mx.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { sm := rm.ScopeMetrics().At(j) - for k := 0; k < sm.Metrics().Len(); k++ { + for k := range sm.Metrics().Len() { md := sm.Metrics().At(k) // these metrics are an APM Stats payload; consume it as such - for l := 0; l < md.Sum().DataPoints().Len(); l++ { + for l := range md.Sum().DataPoints().Len() { if payload, ok := md.Sum().DataPoints().At(l).Attributes().Get(keyStatsPayload); ok { stats := &pb.StatsPayload{} err = proto.Unmarshal(payload.Bytes().AsRaw(), stats) diff --git a/receiver/datadogreceiver/internal/translator/testutil.go b/receiver/datadogreceiver/internal/translator/testutil.go index c09bfe1656423..90348ed16c97c 100644 --- a/receiver/datadogreceiver/internal/translator/testutil.go +++ b/receiver/datadogreceiver/internal/translator/testutil.go @@ -71,12 +71,12 @@ func requireDp(t *testing.T, dp pmetric.NumberDataPoint, expectedAttrs pcommon.M func totalHistBucketCounts(hist pmetric.ExponentialHistogramDataPoint) uint64 { var totalCount uint64 - for i := 0; i < hist.Negative().BucketCounts().Len(); i++ { + for i := range hist.Negative().BucketCounts().Len() { totalCount += hist.Negative().BucketCounts().At(i) } totalCount += hist.ZeroCount() - for i := 0; i < hist.Positive().BucketCounts().Len(); i++ { + for i := range hist.Positive().BucketCounts().Len() { totalCount += hist.Positive().BucketCounts().At(i) } return totalCount diff --git a/receiver/datadogreceiver/internal/translator/traces_translator.go b/receiver/datadogreceiver/internal/translator/traces_translator.go index 2ccad7ed53989..373e5b195da8d 100644 --- a/receiver/datadogreceiver/internal/translator/traces_translator.go +++ b/receiver/datadogreceiver/internal/translator/traces_translator.go @@ -190,7 +190,7 @@ func tagsToSpanLinks(tags map[string]string, dest ptrace.SpanLinkSlice) error { return err } - for i := 0; i < len(spans); i++ { + for i := range spans { span := spans[i] link := dest.AppendEmpty() diff --git a/receiver/datadogreceiver/internal/translator/traces_translator_test.go b/receiver/datadogreceiver/internal/translator/traces_translator_test.go index f1ad7014d7c44..126796baf268d 100644 --- a/receiver/datadogreceiver/internal/translator/traces_translator_test.go +++ b/receiver/datadogreceiver/internal/translator/traces_translator_test.go @@ -146,7 +146,7 @@ func TestTracePayloadV07Unmarshalling(t *testing.T) { func BenchmarkTranslatorv05(b *testing.B) { b.StartTimer() - for n := 0; n < b.N; n++ { + for range b.N { TestTracePayloadV05Unmarshalling(&testing.T{}) } b.StopTimer() @@ -154,7 +154,7 @@ func BenchmarkTranslatorv05(b *testing.B) { func BenchmarkTranslatorv07(b *testing.B) { b.StartTimer() - for n := 0; n < b.N; n++ { + for range b.N { TestTracePayloadV07Unmarshalling(&testing.T{}) } b.StopTimer() @@ -186,7 +186,7 @@ func TestTracePayloadApiV02Unmarshalling(t *testing.T) { func agentPayloadFromTraces(traces *pb.Traces) (agentPayload pb.AgentPayload) { numberOfTraces := 2 var tracerPayloads []*pb.TracerPayload - for i := 0; i < numberOfTraces; i++ { + for i := range numberOfTraces { payload := &pb.TracerPayload{ LanguageName: strconv.Itoa(i), LanguageVersion: strconv.Itoa(i), diff --git a/receiver/dockerstatsreceiver/integration_test.go b/receiver/dockerstatsreceiver/integration_test.go index 571a2cb6e07e9..ca991e716dc70 100644 --- a/receiver/dockerstatsreceiver/integration_test.go +++ b/receiver/dockerstatsreceiver/integration_test.go @@ -59,7 +59,7 @@ func createNginxContainer(ctx context.Context, t *testing.T) testcontainers.Cont func hasResourceScopeMetrics(containerID string, metrics []pmetric.Metrics) bool { for _, m := range metrics { - for i := 0; i < m.ResourceMetrics().Len(); i++ { + for i := range m.ResourceMetrics().Len() { rm := m.ResourceMetrics().At(i) id, ok := rm.Resource().Attributes().Get(conventions.AttributeContainerID) diff --git a/receiver/filelogreceiver/filelog_test.go b/receiver/filelogreceiver/filelog_test.go index 976e6d8d82147..d1af7dc4a29bd 100644 --- a/receiver/filelogreceiver/filelog_test.go +++ b/receiver/filelogreceiver/filelog_test.go @@ -185,7 +185,7 @@ func (rt *rotationTest) Run(t *testing.T) { }() require.NoError(t, err) - for i := 0; i < numLogs; i++ { + for i := range numLogs { if (i+1)%maxLinesPerFile == 0 { if rt.copyTruncate { // Recreate the backup file diff --git a/receiver/filelogreceiver/storage_test.go b/receiver/filelogreceiver/storage_test.go index 67d9fcc1a1a18..844ebf15f2448 100644 --- a/receiver/filelogreceiver/storage_test.go +++ b/receiver/filelogreceiver/storage_test.go @@ -185,11 +185,11 @@ func expectLogs(sink *consumertest.LogsSink, expected []string) func() bool { for _, logs := range sink.AllLogs() { rl := logs.ResourceLogs() - for i := 0; i < rl.Len(); i++ { + for i := range rl.Len() { sl := rl.At(i).ScopeLogs() - for j := 0; j < sl.Len(); j++ { + for j := range sl.Len() { lrs := sl.At(j).LogRecords() - for k := 0; k < lrs.Len(); k++ { + for k := range lrs.Len() { body := lrs.At(k).Body().Str() found[body] = true } diff --git a/receiver/fluentforwardreceiver/conversion.go b/receiver/fluentforwardreceiver/conversion.go index 7ede36943b49d..a7ffc1d9c33e3 100644 --- a/receiver/fluentforwardreceiver/conversion.go +++ b/receiver/fluentforwardreceiver/conversion.go @@ -275,7 +275,7 @@ func (fe *ForwardEventLogRecords) DecodeMsg(dc *msgp.Reader) error { } fe.LogRecordSlice.EnsureCapacity(int(entryLen)) - for i := 0; i < int(entryLen); i++ { + for i := range entryLen { lr := fe.LogRecordSlice.AppendEmpty() err = parseEntryToLogRecord(dc, lr) diff --git a/receiver/fluentforwardreceiver/conversion_test.go b/receiver/fluentforwardreceiver/conversion_test.go index b566ad9556411..867aa05de598a 100644 --- a/receiver/fluentforwardreceiver/conversion_test.go +++ b/receiver/fluentforwardreceiver/conversion_test.go @@ -142,7 +142,7 @@ func TestMessageEventConversionWithErrors(t *testing.T) { b = msgp.AppendString(b, "a") b = msgp.AppendFloat64(b, 5.0) - for i := 0; i < len(b)-1; i++ { + for i := range len(b) - 1 { t.Run(fmt.Sprintf("EOF at byte %d", i), func(t *testing.T) { reader := msgp.NewReader(bytes.NewReader(b[:i])) @@ -156,7 +156,7 @@ func TestMessageEventConversionWithErrors(t *testing.T) { func TestForwardEventConversionWithErrors(t *testing.T) { b := parseHexDump("testdata/forward-event") - for i := 0; i < len(b)-1; i++ { + for i := range len(b) - 1 { t.Run(fmt.Sprintf("EOF at byte %d", i), func(t *testing.T) { reader := msgp.NewReader(bytes.NewReader(b[:i])) @@ -170,7 +170,7 @@ func TestForwardEventConversionWithErrors(t *testing.T) { func TestPackedForwardEventConversionWithErrors(t *testing.T) { b := parseHexDump("testdata/forward-packed-compressed") - for i := 0; i < len(b)-1; i++ { + for i := range len(b) - 1 { t.Run(fmt.Sprintf("EOF at byte %d", i), func(t *testing.T) { reader := msgp.NewReader(bytes.NewReader(b[:i])) diff --git a/receiver/fluentforwardreceiver/receiver_test.go b/receiver/fluentforwardreceiver/receiver_test.go index d90e69296bbc5..0231dff5ab05c 100644 --- a/receiver/fluentforwardreceiver/receiver_test.go +++ b/receiver/fluentforwardreceiver/receiver_test.go @@ -375,11 +375,11 @@ func TestHighVolume(t *testing.T) { const totalMessagesPerRoutine = 1000 var wg sync.WaitGroup - for i := 0; i < totalRoutines; i++ { + for i := range totalRoutines { wg.Add(1) go func(num int) { conn := connect() - for j := 0; j < totalMessagesPerRoutine; j++ { + for j := range totalMessagesPerRoutine { eventBytes := makeSampleEvent(fmt.Sprintf("tag-%d-%d", num, j)) n, err := conn.Write(eventBytes) assert.NoError(t, err) diff --git a/receiver/githubreceiver/internal/scraper/githubscraper/helpers.go b/receiver/githubreceiver/internal/scraper/githubscraper/helpers.go index b926e2bcb0c30..45dc3385a0cc5 100644 --- a/receiver/githubreceiver/internal/scraper/githubscraper/helpers.go +++ b/receiver/githubreceiver/internal/scraper/githubscraper/helpers.go @@ -255,7 +255,7 @@ func (ghs *githubScraper) evalCommits( oldest := node[len(node)-1].GetCommittedDate() age = int64(time.Since(oldest).Seconds()) } - for b := 0; b < len(c.Nodes); b++ { + for b := range len(c.Nodes) { additions += c.Nodes[b].Additions deletions += c.Nodes[b].Deletions } diff --git a/receiver/githubreceiver/trace_event_handling_test.go b/receiver/githubreceiver/trace_event_handling_test.go index 7531a2fb89ff2..a19782e731376 100644 --- a/receiver/githubreceiver/trace_event_handling_test.go +++ b/receiver/githubreceiver/trace_event_handling_test.go @@ -209,7 +209,7 @@ func TestNewParentSpanID_Consistency(t *testing.T) { spanID1, err1 := newParentSpanID(runID, runAttempt) require.NoError(t, err1) - for i := 0; i < 5; i++ { + for range 5 { spanID2, err2 := newParentSpanID(runID, runAttempt) require.NoError(t, err2) require.Equal(t, spanID1, spanID2, "span ID should be consistent across multiple calls") diff --git a/receiver/googlecloudspannerreceiver/internal/filter/testhelpers_test.go b/receiver/googlecloudspannerreceiver/internal/filter/testhelpers_test.go index ab7d755ae5820..b3491737b02bd 100644 --- a/receiver/googlecloudspannerreceiver/internal/filter/testhelpers_test.go +++ b/receiver/googlecloudspannerreceiver/internal/filter/testhelpers_test.go @@ -37,7 +37,7 @@ const ( func assertGroupedByKey(t *testing.T, items []*Item, groupedItems map[time.Time][]*Item, key time.Time, offsetInItems int) { assert.Len(t, groupedItems[key], 3) - for i := 0; i < 3; i++ { + for i := range 3 { assert.Equal(t, items[i+offsetInItems].SeriesKey, groupedItems[key][i].SeriesKey) } } diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go index 93860b0b97299..fe825df37f493 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go @@ -93,7 +93,7 @@ func testMetricsFromDataPointBuilderBuild(t *testing.T, metricDataType pmetric.M assert.Equal(t, len(dataForTesting.expectedGroups), metric.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len()) require.Equal(t, "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver", metric.ResourceMetrics().At(0).ScopeMetrics().At(0).Scope().Name()) - for i := 0; i < len(dataForTesting.expectedGroups); i++ { + for i := range len(dataForTesting.expectedGroups) { ilMetric := metric.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(i) expectedGroupingKey := expectedGroupingKeysByMetricName[ilMetric.Name()] expectedDataPoints := dataForTesting.expectedGroups[expectedGroupingKey] diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator_test.go index b95099274bd36..bde0f3effcef5 100644 --- a/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator_test.go +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator_test.go @@ -68,7 +68,7 @@ func TestPullTimestampsWithDifference(t *testing.T) { expectedTimestamp = lowerBound.Add(time.Minute) - for i := 0; i < expectedAmountOfTimestamps; i++ { + for i := range expectedAmountOfTimestamps { assert.Equal(t, expectedTimestamp, timestamps[i]) expectedTimestamp = expectedTimestamp.Add(time.Minute) } diff --git a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go index 86054bce52a95..818dc7d1dcb16 100644 --- a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go +++ b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go @@ -122,7 +122,7 @@ func assertIncludesExpectedMetrics(t *testing.T, got pmetric.Metrics) { returnedMetrics := make(map[string]struct{}) returnedResourceMetrics := make(map[string]struct{}) rms := got.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { rm := rms.At(i) metrics := getMetricSlice(t, rm) returnedMetricNames := getReturnedMetricNames(metrics) @@ -168,7 +168,7 @@ func getMetricSlice(t *testing.T, rm pmetric.ResourceMetrics) pmetric.MetricSlic func getReturnedMetricNames(metrics pmetric.MetricSlice) map[string]struct{} { metricNames := make(map[string]struct{}) - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { metricNames[metrics.At(i).Name()] = struct{}{} } return metricNames @@ -249,7 +249,7 @@ func benchmarkScrapeMetrics(b *testing.B, cfg *Config) { require.NoError(b, receiver.Start(context.Background(), componenttest.NewNopHost())) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { tickerCh <- time.Now() <-sink.ch } diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go index 711675aaa9e45..e2b8fa745a76c 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go @@ -123,7 +123,7 @@ func TestScrape(t *testing.T) { assert.Equal(t, test.expectMetrics, metrics.Len()) reportedMetricsCount := map[string]int{} - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { metric := metrics.At(i) reportedMetricsCount[metric.Name()]++ switch metric.Name() { diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go index f8ad60e8468f4..bcb7c83f51d07 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go @@ -475,7 +475,7 @@ func TestScrape(t *testing.T) { } func findMetricByName(metrics pmetric.MetricSlice, name string) (pmetric.Metric, error) { - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { if metrics.At(i).Name() == name { return metrics.At(i), nil } @@ -489,7 +489,7 @@ func assertFileSystemUsageMetricValid( expectedDeviceDataPoints int, expectedDeviceAttributes []map[string]pcommon.Value, ) { - for i := 0; i < metric.Sum().DataPoints().Len(); i++ { + for i := range metric.Sum().DataPoints().Len() { for _, label := range []string{"device", "type", "mode", "mountpoint"} { internal.AssertSumMetricHasAttribute(t, metric, i, label) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go index be514dde50c87..ee09bf7dbbae1 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go @@ -158,7 +158,7 @@ func TestScrape(t *testing.T) { // Additional test for average per CPU numCPU := runtime.NumCPU() - for i := 0; i < results[testStandard].Len(); i++ { + for i := range results[testStandard].Len() { assertCompareAveragePerCPU(t, results[testAverage].At(i), results[testStandard].At(i), numCPU) } } diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_windows_test.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_windows_test.go index 2eda3a9fa7789..c459a6fb5d679 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_windows_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_windows_test.go @@ -93,7 +93,7 @@ func TestSampleLoad(t *testing.T) { samplerInstance = &sampler{perfCounterScraper: mockPerfCounterScraper} - for i := 0; i < len(counterReturnValues); i++ { + for range counterReturnValues { samplerInstance.sampleLoad() } @@ -106,7 +106,7 @@ func calcExpectedLoad(scrapedValues []int64, loadAvgFactor float64) float64 { // replicate the calculations that should be performed to determine the exponentially // weighted moving averages based on the specified scraped values var expectedLoad float64 - for i := 0; i < len(scrapedValues); i++ { + for i := range scrapedValues { expectedLoad = expectedLoad*loadAvgFactor + float64(scrapedValues[i])*(1-loadAvgFactor) } return expectedLoad @@ -116,7 +116,7 @@ func Benchmark_SampleLoad(b *testing.B) { s, _ := newSampler(zap.NewNop()) b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { s.sampleLoad() } } diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go index e5c8526bec566..52b5c707cf12c 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go @@ -121,7 +121,7 @@ func TestScrape(t *testing.T) { metrics := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() memUsageIdx := -1 - for i := 0; i < md.MetricCount(); i++ { + for i := range md.MetricCount() { if metrics.At(i).Name() == "system.memory.usage" { memUsageIdx = i } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go index 1f3a7b4a566a1..187899c85e5ce 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go @@ -129,7 +129,7 @@ func validateRealData(t *testing.T, metrics pmetric.MetricSlice) { assertContainsStatus := func(statusVal string) { points := countMetric.Sum().DataPoints() - for i := 0; i < points.Len(); i++ { + for i := range points.Len() { v, ok := points.At(i).Attributes().Get("status") if ok && v.Str() == statusVal { return @@ -154,7 +154,7 @@ func validateRealData(t *testing.T, metrics pmetric.MetricSlice) { func validateStartTime(t *testing.T, metrics pmetric.MetricSlice) { startTime, err := host.BootTime() assert.NoError(t, err) - for i := 0; i < metricsLength; i++ { + for i := range metricsLength { internal.AssertSumMetricStartTimeEquals(t, metrics.At(i), pcommon.Timestamp(startTime*1e9)) } } @@ -196,7 +196,7 @@ func validateFakeData(t *testing.T, metrics pmetric.MetricSlice) { points := countMetric.Sum().DataPoints() attrs := map[string]int64{} - for i := 0; i < points.Len(); i++ { + for i := range points.Len() { point := points.At(i) val, ok := point.Attributes().Get("status") assert.Truef(t, ok, "Missing status attribute in data point %d", i) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go index 5671d449ea62a..bd344ecb6dd53 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go @@ -215,7 +215,7 @@ func (s *processScraper) getProcessMetadata(ctx context.Context) ([]*processMeta } data := make([]*processMetadata, 0, handles.Len()) - for i := 0; i < handles.Len(); i++ { + for i := range handles.Len() { pid := handles.Pid(i) handle := handles.At(i) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go index 4d0d85c5ba5b2..a2bc9b354a2df 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go @@ -200,7 +200,7 @@ func assertValidProcessResourceAttributes(t *testing.T, resourceMetrics pmetric. conventions.AttributeProcessOwner, "process.parent_pid", // TODO: use this from conventions when it is available } - for i := 0; i < resourceMetrics.Len(); i++ { + for i := range resourceMetrics.Len() { attrs := resourceMetrics.At(i).Resource().Attributes().AsRaw() for _, attr := range requiredResourceAttributes { _, ok := attrs[attr] @@ -288,9 +288,9 @@ func assertThreadsCountValid(t *testing.T, resourceMetrics pmetric.ResourceMetri } func assertMetricMissing(t *testing.T, resourceMetrics pmetric.ResourceMetricsSlice, expectedMetricName string) { - for i := 0; i < resourceMetrics.Len(); i++ { + for i := range resourceMetrics.Len() { metrics := getMetricSlice(t, resourceMetrics.At(i)) - for j := 0; j < metrics.Len(); j++ { + for j := range metrics.Len() { metric := metrics.At(j) if metric.Name() == expectedMetricName { require.Fail(t, fmt.Sprintf("metric with name %s should not be present", expectedMetricName)) @@ -347,9 +347,9 @@ func assertOpenFileDescriptorMetricValid(t *testing.T, resourceMetrics pmetric.R } func assertSameTimeStampForAllMetricsWithinResource(t *testing.T, resourceMetrics pmetric.ResourceMetricsSlice) { - for i := 0; i < resourceMetrics.Len(); i++ { + for i := range resourceMetrics.Len() { ilms := resourceMetrics.At(i).ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { internal.AssertSameTimeStampForAllMetrics(t, ilms.At(j).Metrics()) } } @@ -359,16 +359,16 @@ func assertUptimeMetricValid(t *testing.T, resourceMetrics pmetric.ResourceMetri m := getMetric(t, "process.uptime", resourceMetrics) assert.Equal(t, "process.uptime", m.Name()) - for i := 0; i < m.Gauge().DataPoints().Len(); i++ { + for i := range m.Gauge().DataPoints().Len() { dp := m.Gauge().DataPoints().At(i) assert.Equal(t, float64(199.9), dp.DoubleValue(), "Must have an uptime of 199s") } } func getMetric(t *testing.T, expectedMetricName string, rms pmetric.ResourceMetricsSlice) pmetric.Metric { - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { metrics := getMetricSlice(t, rms.At(i)) - for j := 0; j < metrics.Len(); j++ { + for j := range metrics.Len() { metric := metrics.At(j) if metric.Name() == expectedMetricName { return metric @@ -1354,7 +1354,7 @@ func TestScrapeMetrics_CpuUtilizationWhenCpuTimesIsDisabled(t *testing.T) { md, err := scraper.scrape(context.Background()) assert.NoError(t, err) - for k := 0; k < md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len(); k++ { + for k := range md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len() { fmt.Println(md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(k).Name()) } assert.Equal(t, testCase.expectedMetricCount, md.MetricCount()) diff --git a/receiver/hostmetricsreceiver/internal/testutils.go b/receiver/hostmetricsreceiver/internal/testutils.go index 3a34bc1829895..fd39bdbc6c4e7 100644 --- a/receiver/hostmetricsreceiver/internal/testutils.go +++ b/receiver/hostmetricsreceiver/internal/testutils.go @@ -32,7 +32,7 @@ func AssertSumMetricHasAttribute(t *testing.T, metric pmetric.Metric, index int, func AssertSumMetricStartTimeEquals(t *testing.T, metric pmetric.Metric, startTime pcommon.Timestamp) { ddps := metric.Sum().DataPoints() - for i := 0; i < ddps.Len(); i++ { + for i := range ddps.Len() { require.Equal(t, startTime, ddps.At(i).StartTimestamp()) } } @@ -50,7 +50,7 @@ func AssertGaugeMetricHasAttribute(t *testing.T, metric pmetric.Metric, index in func AssertGaugeMetricStartTimeEquals(t *testing.T, metric pmetric.Metric, startTime pcommon.Timestamp) { ddps := metric.Gauge().DataPoints() - for i := 0; i < ddps.Len(); i++ { + for i := range ddps.Len() { require.Equal(t, startTime, ddps.At(i).StartTimestamp()) } } @@ -65,7 +65,7 @@ func AssertSameTimeStampForMetrics(t *testing.T, metrics pmetric.MetricSlice, st metric := metrics.At(i) if metric.Type() == pmetric.MetricTypeSum { ddps := metric.Sum().DataPoints() - for j := 0; j < ddps.Len(); j++ { + for j := range ddps.Len() { if ts == 0 { ts = ddps.At(j).Timestamp() } diff --git a/receiver/jaegerreceiver/jaeger_agent_test.go b/receiver/jaegerreceiver/jaeger_agent_test.go index 06f4a915b889a..06240a0937886 100644 --- a/receiver/jaegerreceiver/jaeger_agent_test.go +++ b/receiver/jaegerreceiver/jaeger_agent_test.go @@ -116,7 +116,7 @@ func testJaegerAgent(t *testing.T, agentEndpoint string, receiverConfig Protocol require.NoError(t, err) t.Cleanup(func() { require.NoError(t, jr.Shutdown(context.Background())) }) - for i := 0; i < 3; i++ { + for range 3 { err = jr.Start(context.Background(), componenttest.NewNopHost()) if err == nil { break diff --git a/receiver/k8sclusterreceiver/internal/metadata/entities_test.go b/receiver/k8sclusterreceiver/internal/metadata/entities_test.go index 9602d826bbac1..0fcda8b77fc0f 100644 --- a/receiver/k8sclusterreceiver/internal/metadata/entities_test.go +++ b/receiver/k8sclusterreceiver/internal/metadata/entities_test.go @@ -190,7 +190,7 @@ func Test_GetEntityEvents(t *testing.T) { timestamp := pcommon.NewTimestampFromTime(time.Now()) events := GetEntityEvents(tt.old, tt.new, timestamp, 1*time.Hour) require.Equal(t, tt.events.Len(), events.Len()) - for i := 0; i < events.Len(); i++ { + for i := range events.Len() { actual := events.At(i) expected := tt.events.At(i) assert.EqualValues(t, timestamp, actual.Timestamp()) diff --git a/receiver/k8sclusterreceiver/mock_resources_test.go b/receiver/k8sclusterreceiver/mock_resources_test.go index 3440cf569b27c..82dcf56298150 100644 --- a/receiver/k8sclusterreceiver/mock_resources_test.go +++ b/receiver/k8sclusterreceiver/mock_resources_test.go @@ -22,7 +22,7 @@ import ( func createPods(t *testing.T, client *fake.Clientset, numPods int) []*corev1.Pod { out := make([]*corev1.Pod, 0, numPods) - for i := 0; i < numPods; i++ { + for i := range numPods { p := &corev1.Pod{ ObjectMeta: v1.ObjectMeta{ UID: types.UID("pod" + strconv.Itoa(i)), @@ -40,7 +40,7 @@ func createPods(t *testing.T, client *fake.Clientset, numPods int) []*corev1.Pod } func deletePods(t *testing.T, client *fake.Clientset, numPods int) { - for i := 0; i < numPods; i++ { + for i := range numPods { err := client.CoreV1().Pods("test").Delete(context.Background(), strconv.Itoa(i), v1.DeleteOptions{}) require.NoError(t, err, "error creating node") } @@ -49,7 +49,7 @@ func deletePods(t *testing.T, client *fake.Clientset, numPods int) { } func createNodes(t *testing.T, client *fake.Clientset, numNodes int) { - for i := 0; i < numNodes; i++ { + for i := range numNodes { n := &corev1.Node{ ObjectMeta: v1.ObjectMeta{ UID: types.UID("node" + strconv.Itoa(i)), @@ -64,7 +64,7 @@ func createNodes(t *testing.T, client *fake.Clientset, numNodes int) { } func createClusterQuota(t *testing.T, client *fakeQuota.Clientset, numQuotas int) { - for i := 0; i < numQuotas; i++ { + for i := range numQuotas { q := "av1.ClusterResourceQuota{ ObjectMeta: v1.ObjectMeta{ Name: fmt.Sprintf("test-clusterquota-%d", i), diff --git a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go index 300dc0813c39c..db23aa8d16477 100644 --- a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go +++ b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go @@ -60,7 +60,7 @@ func TestUnstructuredListToLogData(t *testing.T) { objects := unstructured.UnstructuredList{ Items: []unstructured.Unstructured{}, } - for i := 0; i < 3; i++ { + for i := range 3 { object := unstructured.Unstructured{} object.SetKind("Node") object.SetName(fmt.Sprintf("node-%d", i)) diff --git a/receiver/kafkametricsreceiver/broker_scraper_test.go b/receiver/kafkametricsreceiver/broker_scraper_test.go index 5d8a3ce572974..e07708d51f699 100644 --- a/receiver/kafkametricsreceiver/broker_scraper_test.go +++ b/receiver/kafkametricsreceiver/broker_scraper_test.go @@ -129,7 +129,7 @@ func TestBrokerScraper_scrape(t *testing.T) { require.Equal(t, testClusterAlias, val.Str()) } ms := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() - for i := 0; i < ms.Len(); i++ { + for i := range ms.Len() { m := ms.At(i) switch m.Name() { case "kafka.brokers": diff --git a/receiver/kafkametricsreceiver/topic_scraper_test.go b/receiver/kafkametricsreceiver/topic_scraper_test.go index 27f54e07dc108..b46a79286c1a9 100644 --- a/receiver/kafkametricsreceiver/topic_scraper_test.go +++ b/receiver/kafkametricsreceiver/topic_scraper_test.go @@ -120,7 +120,7 @@ func TestTopicScraper_scrapes(t *testing.T) { require.Equal(t, testClusterAlias, val.Str()) } ms := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() - for i := 0; i < ms.Len(); i++ { + for i := range ms.Len() { m := ms.At(i) switch m.Name() { case "kafka.topic.partitions": diff --git a/receiver/kafkareceiver/header_extraction.go b/receiver/kafkareceiver/header_extraction.go index efae723c2011f..027e03b249b3e 100644 --- a/receiver/kafkareceiver/header_extraction.go +++ b/receiver/kafkareceiver/header_extraction.go @@ -33,7 +33,7 @@ func (he *headerExtractor) extractHeadersTraces(traces ptrace.Traces, message *s he.logger.Debug("Header key not found in the trace: ", zap.String("key", header)) continue } - for i := 0; i < traces.ResourceSpans().Len(); i++ { + for i := range traces.ResourceSpans().Len() { rs := traces.ResourceSpans().At(i) rs.Resource().Attributes().PutStr(getAttribute(header), value) } @@ -47,7 +47,7 @@ func (he *headerExtractor) extractHeadersLogs(logs plog.Logs, message *sarama.Co he.logger.Debug("Header key not found in the log: ", zap.String("key", header)) continue } - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { rl := logs.ResourceLogs().At(i) rl.Resource().Attributes().PutStr(getAttribute(header), value) } @@ -61,7 +61,7 @@ func (he *headerExtractor) extractHeadersMetrics(metrics pmetric.Metrics, messag he.logger.Debug("Header key not found in the metric: ", zap.String("key", header)) continue } - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) rm.Resource().Attributes().PutStr(getAttribute(header), value) } diff --git a/receiver/kafkareceiver/header_extraction_test.go b/receiver/kafkareceiver/header_extraction_test.go index 76f72b8595ce3..6f0097da26961 100644 --- a/receiver/kafkareceiver/header_extraction_test.go +++ b/receiver/kafkareceiver/header_extraction_test.go @@ -56,7 +56,7 @@ func TestHeaderExtractionTraces(t *testing.T) { go func() { err = c.ConsumeClaim(testSession, groupClaim) for _, trace := range nextConsumer.AllTraces() { - for i := 0; i < trace.ResourceSpans().Len(); i++ { + for i := range trace.ResourceSpans().Len() { rs := trace.ResourceSpans().At(i) validateHeader(t, rs.Resource(), "kafka.header.headerKey1", "headerValue1") validateHeader(t, rs.Resource(), "kafka.header.headerKey2", "headerValue2") @@ -122,7 +122,7 @@ func TestHeaderExtractionLogs(t *testing.T) { go func() { err = c.ConsumeClaim(testSession, groupClaim) for _, logs := range nextConsumer.AllLogs() { - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for i := range logs.ResourceLogs().Len() { rs := logs.ResourceLogs().At(i) validateHeader(t, rs.Resource(), "kafka.header.headerKey1", "headerValueLog1") validateHeader(t, rs.Resource(), "kafka.header.headerKey2", "headerValueLog2") @@ -181,7 +181,7 @@ func TestHeaderExtractionMetrics(t *testing.T) { go func() { err = c.ConsumeClaim(testSession, groupClaim) for _, metric := range nextConsumer.AllMetrics() { - for i := 0; i < metric.ResourceMetrics().Len(); i++ { + for i := range metric.ResourceMetrics().Len() { rs := metric.ResourceMetrics().At(i) validateHeader(t, rs.Resource(), "kafka.header.headerKey1", "headerValueMetric1") validateHeader(t, rs.Resource(), "kafka.header.headerKey2", "headerValueMetric2") diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go index 659aa0e420063..15f9fdacc6f30 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go @@ -212,7 +212,7 @@ func TestMetadataErrorCases(t *testing.T) { assert.Len(t, acc.m, tt.numMDs) require.Equal(t, tt.numLogs, logs.Len()) - for i := 0; i < tt.numLogs; i++ { + for i := range tt.numLogs { assert.Equal(t, tt.logMessages[i], logs.All()[i].Message) } }) diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go index 94fef59f09463..f45d174589db1 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go @@ -49,13 +49,13 @@ func TestMetricAccumulator(t *testing.T) { func requireMetricsOk(t *testing.T, mds []pmetric.Metrics) { for _, md := range mds { - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rm := md.ResourceMetrics().At(i) requireResourceOk(t, rm.Resource()) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { ilm := rm.ScopeMetrics().At(j) require.Equal(t, "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver", ilm.Scope().Name()) - for k := 0; k < ilm.Metrics().Len(); k++ { + for k := range ilm.Metrics().Len() { requireMetricOk(t, ilm.Metrics().At(k)) } } @@ -69,7 +69,7 @@ func requireMetricOk(t *testing.T, m pmetric.Metric) { switch m.Type() { case pmetric.MetricTypeGauge: gauge := m.Gauge() - for i := 0; i < gauge.DataPoints().Len(); i++ { + for i := range gauge.DataPoints().Len() { dp := gauge.DataPoints().At(i) require.NotZero(t, dp.Timestamp()) requirePointOk(t, dp) @@ -78,7 +78,7 @@ func requireMetricOk(t *testing.T, m pmetric.Metric) { sum := m.Sum() require.True(t, sum.IsMonotonic()) require.Equal(t, pmetric.AggregationTemporalityCumulative, sum.AggregationTemporality()) - for i := 0; i < sum.DataPoints().Len(); i++ { + for i := range sum.DataPoints().Len() { dp := sum.DataPoints().At(i) // Start time is required for cumulative metrics. Make assertions // around start time only when dealing with one or when it is set. @@ -172,7 +172,7 @@ func TestEmitMetrics(t *testing.T) { for _, name := range metricNames { requireContains(t, metrics, name) metric := metrics[name][0] - for i := 0; i < metric.Sum().DataPoints().Len(); i++ { + for i := range metric.Sum().DataPoints().Len() { dp := metric.Sum().DataPoints().At(i) _, found := dp.Attributes().Get("direction") require.True(t, found, "expected direction attribute") @@ -188,11 +188,11 @@ func requireContains(t *testing.T, metrics map[string][]pmetric.Metric, metricNa func indexedFakeMetrics(mds []pmetric.Metrics) map[string][]pmetric.Metric { metrics := make(map[string][]pmetric.Metric) for _, md := range mds { - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rm := md.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { ilm := rm.ScopeMetrics().At(j) - for k := 0; k < ilm.Metrics().Len(); k++ { + for k := range ilm.Metrics().Len() { m := ilm.Metrics().At(k) metricName := m.Name() list := metrics[metricName] diff --git a/receiver/libhoneyreceiver/internal/parser/parser.go b/receiver/libhoneyreceiver/internal/parser/parser.go index 8488f74faf6d8..b3c1d26ebca49 100644 --- a/receiver/libhoneyreceiver/internal/parser/parser.go +++ b/receiver/libhoneyreceiver/internal/parser/parser.go @@ -91,7 +91,7 @@ func ToPdata(dataset string, lhes []libhoneyevent.LibhoneyEvent, cfg libhoneyeve start := time.Now() for _, ss := range foundScopes.Scope { - for i := 0; i < ss.ScopeSpans.Len(); i++ { + for i := range ss.ScopeSpans.Len() { sp := ss.ScopeSpans.At(i) spID := trc.SpanID(sp.SpanID()) diff --git a/receiver/mongodbatlasreceiver/alerts_test.go b/receiver/mongodbatlasreceiver/alerts_test.go index 2e5a6884cc010..3e4f9eaa2cc5a 100644 --- a/receiver/mongodbatlasreceiver/alerts_test.go +++ b/receiver/mongodbatlasreceiver/alerts_test.go @@ -675,11 +675,11 @@ func testAlert() mongodbatlas.Alert { } func validateAttributes(t *testing.T, expectedStringAttributes map[string]string, logs plog.Logs) { - for i := 0; i < logs.ResourceLogs().Len(); i++ { + for range logs.ResourceLogs().Len() { rl := logs.ResourceLogs().At(0) - for j := 0; j < rl.ScopeLogs().Len(); j++ { + for j := range rl.ScopeLogs().Len() { sl := rl.ScopeLogs().At(j) - for k := 0; k < sl.LogRecords().Len(); k++ { + for k := range sl.LogRecords().Len() { lr := sl.LogRecords().At(k) for k, v := range expectedStringAttributes { val, ok := lr.Attributes().Get(k) diff --git a/receiver/mysqlreceiver/scraper.go b/receiver/mysqlreceiver/scraper.go index b36652512376e..ef9a23187a258 100644 --- a/receiver/mysqlreceiver/scraper.go +++ b/receiver/mysqlreceiver/scraper.go @@ -427,7 +427,7 @@ func (m *mySQLScraper) scrapeTableStats(now pcommon.Timestamp, errs *scrapererro return } - for i := 0; i < len(tableStats); i++ { + for i := range tableStats { s := tableStats[i] // counts m.mb.RecordMysqlTableRowsDataPoint(now, s.rows, s.name, s.schema) @@ -445,7 +445,7 @@ func (m *mySQLScraper) scrapeTableIoWaitsStats(now pcommon.Timestamp, errs *scra return } - for i := 0; i < len(tableIoWaitsStats); i++ { + for i := range tableIoWaitsStats { s := tableIoWaitsStats[i] // counts m.mb.RecordMysqlTableIoWaitCountDataPoint(now, s.countDelete, metadata.AttributeIoWaitsOperationsDelete, s.name, s.schema) @@ -477,7 +477,7 @@ func (m *mySQLScraper) scrapeIndexIoWaitsStats(now pcommon.Timestamp, errs *scra return } - for i := 0; i < len(indexIoWaitsStats); i++ { + for i := range indexIoWaitsStats { s := indexIoWaitsStats[i] // counts m.mb.RecordMysqlIndexIoWaitCountDataPoint(now, s.countDelete, metadata.AttributeIoWaitsOperationsDelete, s.name, s.schema, s.index) @@ -509,7 +509,7 @@ func (m *mySQLScraper) scrapeStatementEventsStats(now pcommon.Timestamp, errs *s return } - for i := 0; i < len(statementEventsStats); i++ { + for i := range statementEventsStats { s := statementEventsStats[i] m.mb.RecordMysqlStatementEventCountDataPoint(now, s.countCreatedTmpDiskTables, s.schema, s.digest, s.digestText, metadata.AttributeEventStateCreatedTmpDiskTables) m.mb.RecordMysqlStatementEventCountDataPoint(now, s.countCreatedTmpTables, s.schema, s.digest, s.digestText, metadata.AttributeEventStateCreatedTmpTables) @@ -534,7 +534,7 @@ func (m *mySQLScraper) scrapeTableLockWaitEventStats(now pcommon.Timestamp, errs return } - for i := 0; i < len(tableLockWaitEventStats); i++ { + for i := range tableLockWaitEventStats { s := tableLockWaitEventStats[i] // read data points m.mb.RecordMysqlTableLockWaitReadCountDataPoint(now, s.countReadNormal, s.schema, s.name, metadata.AttributeReadLockTypeNormal) @@ -573,7 +573,7 @@ func (m *mySQLScraper) scrapeReplicaStatusStats(now pcommon.Timestamp) { return } - for i := 0; i < len(replicaStatusStats); i++ { + for i := range replicaStatusStats { s := replicaStatusStats[i] val, _ := s.secondsBehindSource.Value() diff --git a/receiver/namedpipereceiver/namedpipe_test.go b/receiver/namedpipereceiver/namedpipe_test.go index 433492ab0f4a0..5d865d90dd98d 100644 --- a/receiver/namedpipereceiver/namedpipe_test.go +++ b/receiver/namedpipereceiver/namedpipe_test.go @@ -67,7 +67,7 @@ func TestReadPipe(t *testing.T) { // Write 10 logs into the pipe and assert that they all come out the other end. numLogs := 10 - for i := 0; i < numLogs; i++ { + for range numLogs { _, err = pipe.WriteString("test\n") require.NoError(t, err, "failed to write to pipe") } diff --git a/receiver/opencensusreceiver/internal/ocmetrics/opencensus_test.go b/receiver/opencensusreceiver/internal/ocmetrics/opencensus_test.go index 6243c8d35e28a..fa11f42f4b17b 100644 --- a/receiver/opencensusreceiver/internal/ocmetrics/opencensus_test.go +++ b/receiver/opencensusreceiver/internal/ocmetrics/opencensus_test.go @@ -144,7 +144,7 @@ func TestExportMultiplexing(t *testing.T) { resultsMapping := make(map[string][]*metricspb.Metric) for _, md := range metricSink.AllMetrics() { rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { node, _, metrics := opencensus.ResourceMetricsToOC(rms.At(i)) resultsMapping[nodeToKey(node)] = append(resultsMapping[nodeToKey(node)], metrics...) } @@ -230,7 +230,7 @@ func TestExportProtocolViolations_nodelessFirstMessage(t *testing.T) { // Now the response should return an error and should have been torn down // regardless of the number of times after invocation below, or any attempt // to send the proper/corrective data should be rejected. - for i := 0; i < 10; i++ { + for i := range 10 { recv, err := metricsClient.Recv() if recv != nil { t.Errorf("Iteration #%d: Unexpectedly got back a response: %#v", i, recv) @@ -297,7 +297,7 @@ func TestExportProtocolConformation_metricsInFirstMessage(t *testing.T) { resultsMapping := make(map[string][]*metricspb.Metric) for _, md := range metricSink.AllMetrics() { rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { node, _, metrics := opencensus.ResourceMetricsToOC(rms.At(i)) resultsMapping[nodeToKey(node)] = append(resultsMapping[nodeToKey(node)], metrics...) } diff --git a/receiver/opencensusreceiver/internal/octrace/observability_test.go b/receiver/opencensusreceiver/internal/octrace/observability_test.go index 6ea9ca8ce0a0a..2d98c6587ab21 100644 --- a/receiver/opencensusreceiver/internal/octrace/observability_test.go +++ b/receiver/opencensusreceiver/internal/octrace/observability_test.go @@ -53,7 +53,7 @@ func TestEnsureRecordedMetrics(t *testing.T) { traceSvcClient, traceSvcDoneFn, err := makeTraceServiceClient(addr) require.NoError(t, err, "Failed to create the trace service client: %v", err) spans := []*tracepb.Span{{TraceId: []byte("abcdefghijklmnop"), SpanId: []byte("12345678")}} - for i := 0; i < n; i++ { + for range n { err = traceSvcClient.Send(&agenttracepb.ExportTraceServiceRequest{Spans: spans, Node: &commonpb.Node{}}) require.NoError(t, err, "Failed to send requests to the service: %v", err) } @@ -102,7 +102,7 @@ func TestExportSpanLinkingMaintainsParentLink(t *testing.T) { require.NoError(t, err, "Failed to create the trace service client: %v", err) n := 5 - for i := 0; i < n; i++ { + for i := range n { sl := []*tracepb.Span{{TraceId: []byte("abcdefghijklmnop"), SpanId: []byte{byte(i + 1), 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}}} err = traceSvcClient.Send(&agenttracepb.ExportTraceServiceRequest{Spans: sl, Node: &commonpb.Node{}}) require.NoError(t, err, "Failed to send requests to the service: %v", err) diff --git a/receiver/opencensusreceiver/internal/octrace/opencensus_test.go b/receiver/opencensusreceiver/internal/octrace/opencensus_test.go index 0f58c50645201..031dc5831779b 100644 --- a/receiver/opencensusreceiver/internal/octrace/opencensus_test.go +++ b/receiver/opencensusreceiver/internal/octrace/opencensus_test.go @@ -140,7 +140,7 @@ func TestExportMultiplexing(t *testing.T) { resultsMapping := make(map[string][]*tracepb.Span) for _, td := range spanSink.AllTraces() { rss := td.ResourceSpans() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { node, _, spans := opencensus.ResourceSpansToOC(rss.At(i)) resultsMapping[nodeToKey(node)] = append(resultsMapping[nodeToKey(node)], spans...) } @@ -241,7 +241,7 @@ func TestExportProtocolViolations_nodelessFirstMessage(t *testing.T) { // Now the response should return an error and should have been torn down // regardless of the number of times after invocation below, or any attempt // to send the proper/corrective data should be rejected. - for i := 0; i < 10; i++ { + for i := range 10 { recv, err := traceClient.Recv() if recv != nil { t.Errorf("Iteration #%d: Unexpectedly got back a response: %#v", i, recv) @@ -310,7 +310,7 @@ func TestExportProtocolConformation_spansInFirstMessage(t *testing.T) { resultsMapping := make(map[string][]*tracepb.Span) for _, td := range spanSink.AllTraces() { rss := td.ResourceSpans() - for i := 0; i < rss.Len(); i++ { + for i := range rss.Len() { node, _, spans := opencensus.ResourceSpansToOC(rss.At(i)) resultsMapping[nodeToKey(node)] = append(resultsMapping[nodeToKey(node)], spans...) } diff --git a/receiver/oracledbreceiver/scraper_test.go b/receiver/oracledbreceiver/scraper_test.go index 245ce9fd8c2c1..dbe39d70c5c77 100644 --- a/receiver/oracledbreceiver/scraper_test.go +++ b/receiver/oracledbreceiver/scraper_test.go @@ -156,7 +156,7 @@ func TestScraper_Scrape(t *testing.T) { assert.True(t, ok) assert.Equal(t, "", name.Str()) var found pmetric.Metric - for i := 0; i < m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len(); i++ { + for i := range m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len() { metric := m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(i) if metric.Name() == "oracledb.consistent_gets" { found = metric diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go index 33a8ee62768fc..c50c6773149c4 100644 --- a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go +++ b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go @@ -822,7 +822,7 @@ func TestReceiverEOF(t *testing.T) { ctc.start(ctc.newRealConsumer, defaultBQ()) go func() { - for i := 0; i < times; i++ { + for range times { td := testdata.GenerateTraces(2) expectData = append(expectData, td) @@ -844,13 +844,13 @@ func TestReceiverEOF(t *testing.T) { wg.Done() }() - for i := 0; i < times; i++ { + for range times { actualData = append(actualData, (<-ctc.consume).Data.(ptrace.Traces)) } assert.Equal(t, len(expectData), len(actualData)) - for i := 0; i < len(expectData); i++ { + for i := range expectData { otelAssert.Equiv(stdTesting, []json.Marshaler{ compareJSONTraces{expectData[i]}, }, []json.Marshaler{ @@ -980,7 +980,7 @@ func TestHeaderReceiverStreamContextOnly(t *testing.T) { h := newHeaderReceiver(ctx, nil, true) - for i := 0; i < 3; i++ { + for range 3 { cc, _, err := h.combineHeaders(ctx, nil) require.NoError(t, err) @@ -998,7 +998,7 @@ func TestHeaderReceiverNoIncludeMetadata(t *testing.T) { h := newHeaderReceiver(ctx, nil, false) - for i := 0; i < 3; i++ { + for range 3 { cc, _, err := h.combineHeaders(ctx, nil) require.NoError(t, err) @@ -1022,7 +1022,7 @@ func TestHeaderReceiverAuthServerNoIncludeMetadata(t *testing.T) { h := newHeaderReceiver(ctx, as, false) - for i := 0; i < 3; i++ { + for range 3 { cc, hdrs, err := h.combineHeaders(ctx, nil) // The incoming metadata keys are not in the context. @@ -1052,7 +1052,7 @@ func TestHeaderReceiverRequestNoStreamMetadata(t *testing.T) { h := newHeaderReceiver(ctx, nil, true) - for i := 0; i < 3; i++ { + for range 3 { hpb.Reset() for key, vals := range expect { @@ -1092,7 +1092,7 @@ func TestHeaderReceiverAuthServerIsSetNoIncludeMetadata(t *testing.T) { h := newHeaderReceiver(ctx, as, true) - for i := 0; i < 3; i++ { + for range 3 { hpb.Reset() for key, vals := range expect { @@ -1151,7 +1151,7 @@ func TestHeaderReceiverBothMetadata(t *testing.T) { h := newHeaderReceiver(ctx, nil, true) - for i := 0; i < 3; i++ { + for range 3 { hpb.Reset() for key, vals := range expectL { @@ -1197,7 +1197,7 @@ func TestHeaderReceiverDuplicateMetadata(t *testing.T) { h := newHeaderReceiver(ctx, nil, true) - for i := 0; i < 3; i++ { + for range 3 { hpb.Reset() for key, vals := range expectRequest { @@ -1385,7 +1385,7 @@ func TestHeaderReceiverIsTraced(t *testing.T) { h := newHeaderReceiver(ctx, nil, true) - for i := 0; i < 3; i++ { + for range 3 { hpb.Reset() for key, vals := range requestHeaders { diff --git a/receiver/otelarrowreceiver/internal/logs/otlp_test.go b/receiver/otelarrowreceiver/internal/logs/otlp_test.go index d8c44d5905427..91cb10c55f391 100644 --- a/receiver/otelarrowreceiver/internal/logs/otlp_test.go +++ b/receiver/otelarrowreceiver/internal/logs/otlp_test.go @@ -140,7 +140,7 @@ func TestExport_AdmissionLimitExceeded(t *testing.T) { var expectSuccess atomic.Int32 - for i := 0; i < 10; i++ { + for range 10 { go func() { defer wait.Done() _, err := logsClient.Export(context.Background(), req) diff --git a/receiver/otelarrowreceiver/internal/metrics/otlp_test.go b/receiver/otelarrowreceiver/internal/metrics/otlp_test.go index b0059a0596854..84412d70822d2 100644 --- a/receiver/otelarrowreceiver/internal/metrics/otlp_test.go +++ b/receiver/otelarrowreceiver/internal/metrics/otlp_test.go @@ -140,7 +140,7 @@ func TestExport_AdmissionLimitExceeded(t *testing.T) { var expectSuccess atomic.Int32 - for i := 0; i < 10; i++ { + for range 10 { go func() { defer wait.Done() _, err := metricsClient.Export(context.Background(), req) diff --git a/receiver/otelarrowreceiver/internal/trace/otlp_test.go b/receiver/otelarrowreceiver/internal/trace/otlp_test.go index 49c3ffa4c2b28..e4a21b4dc614b 100644 --- a/receiver/otelarrowreceiver/internal/trace/otlp_test.go +++ b/receiver/otelarrowreceiver/internal/trace/otlp_test.go @@ -140,7 +140,7 @@ func TestExport_AdmissionLimitExceeded(t *testing.T) { var expectSuccess atomic.Int32 - for i := 0; i < 10; i++ { + for range 10 { go func() { defer wait.Done() _, err := traceClient.Export(context.Background(), req) diff --git a/receiver/otelarrowreceiver/otelarrow_test.go b/receiver/otelarrowreceiver/otelarrow_test.go index 7f3521b29b19d..88a944db717fe 100644 --- a/receiver/otelarrowreceiver/otelarrow_test.go +++ b/receiver/otelarrowreceiver/otelarrow_test.go @@ -610,7 +610,7 @@ func TestGRPCArrowReceiver(t *testing.T) { // Repeatedly send traces via arrow. Set the expected traces // metadata to receive. - for i := 0; i < 10; i++ { + for i := range 10 { td := testdata.GenerateTraces(2) expectTraces = append(expectTraces, td) @@ -731,7 +731,7 @@ func TestGRPCArrowReceiverAuth(t *testing.T) { producer := arrowRecord.NewProducer() // Repeatedly send traces via arrow. Expect an auth error. - for i := 0; i < 10; i++ { + for range 10 { td := testdata.GenerateTraces(2) batch, err := producer.BatchArrowRecordsFromTraces(td) @@ -782,7 +782,7 @@ func TestConcurrentArrowReceiver(t *testing.T) { var wg sync.WaitGroup wg.Add(numStreams) - for j := 0; j < numStreams; j++ { + for range numStreams { go func() { defer wg.Done() @@ -796,7 +796,7 @@ func TestConcurrentArrowReceiver(t *testing.T) { // Repeatedly send traces via arrow. Set the expected traces // metadata to receive. - for i := 0; i < itemsPerStream; i++ { + for i := range itemsPerStream { td := testdata.GenerateTraces(2) headerBuf.Reset() @@ -839,7 +839,7 @@ func TestConcurrentArrowReceiver(t *testing.T) { counts[val]++ } - for i := 0; i < itemsPerStream; i++ { + for i := range itemsPerStream { require.Equal(t, numStreams, counts[i]) } } diff --git a/receiver/otlpjsonfilereceiver/file.go b/receiver/otlpjsonfilereceiver/file.go index ed4c8d3bc4fd6..9a2134c51ca2d 100644 --- a/receiver/otlpjsonfilereceiver/file.go +++ b/receiver/otlpjsonfilereceiver/file.go @@ -89,11 +89,11 @@ func createLogsReceiver(_ context.Context, settings receiver.Settings, configura var l plog.Logs l, err = logsUnmarshaler.UnmarshalLogs(token.Body) // Appends token.Attributes - for i := 0; i < l.ResourceLogs().Len(); i++ { + for i := range l.ResourceLogs().Len() { resourceLog := l.ResourceLogs().At(i) - for j := 0; j < resourceLog.ScopeLogs().Len(); j++ { + for j := range resourceLog.ScopeLogs().Len() { scopeLog := resourceLog.ScopeLogs().At(j) - for k := 0; k < scopeLog.LogRecords().Len(); k++ { + for k := range scopeLog.LogRecords().Len() { LogRecords := scopeLog.LogRecords().At(k) appendToMap(token, LogRecords.Attributes()) } @@ -137,11 +137,11 @@ func createMetricsReceiver(_ context.Context, settings receiver.Settings, config var m pmetric.Metrics m, err = metricsUnmarshaler.UnmarshalMetrics(token.Body) // Appends token.Attributes - for i := 0; i < m.ResourceMetrics().Len(); i++ { + for i := range m.ResourceMetrics().Len() { resourceMetric := m.ResourceMetrics().At(i) - for j := 0; j < resourceMetric.ScopeMetrics().Len(); j++ { + for j := range resourceMetric.ScopeMetrics().Len() { ScopeMetric := resourceMetric.ScopeMetrics().At(j) - for k := 0; k < ScopeMetric.Metrics().Len(); k++ { + for k := range ScopeMetric.Metrics().Len() { metric := ScopeMetric.Metrics().At(k) appendToMap(token, metric.Metadata()) } @@ -184,11 +184,11 @@ func createTracesReceiver(_ context.Context, settings receiver.Settings, configu var t ptrace.Traces t, err = tracesUnmarshaler.UnmarshalTraces(token.Body) // Appends token.Attributes - for i := 0; i < t.ResourceSpans().Len(); i++ { + for i := range t.ResourceSpans().Len() { resourceSpan := t.ResourceSpans().At(i) - for j := 0; j < resourceSpan.ScopeSpans().Len(); j++ { + for j := range resourceSpan.ScopeSpans().Len() { scopeSpan := resourceSpan.ScopeSpans().At(j) - for k := 0; k < scopeSpan.Spans().Len(); k++ { + for k := range scopeSpan.Spans().Len() { spans := scopeSpan.Spans().At(k) appendToMap(token, spans.Attributes()) } diff --git a/receiver/podmanreceiver/record_metrics_test.go b/receiver/podmanreceiver/record_metrics_test.go index ac949613a4883..7e35380d00816 100644 --- a/receiver/podmanreceiver/record_metrics_test.go +++ b/receiver/podmanreceiver/record_metrics_test.go @@ -40,7 +40,7 @@ func assertStatsEqualToMetrics(t *testing.T, podmanStats *containerStats, md pme metrics := rsm.ScopeMetrics().At(0).Metrics() assert.Equal(t, 11, metrics.Len()) - for i := 0; i < metrics.Len(); i++ { + for i := range metrics.Len() { m := metrics.At(i) switch m.Name() { case "container.memory.usage.limit": diff --git a/receiver/prometheusreceiver/internal/metricfamily.go b/receiver/prometheusreceiver/internal/metricfamily.go index 1dc1b1432d9e9..cef803b717ad0 100644 --- a/receiver/prometheusreceiver/internal/metricfamily.go +++ b/receiver/prometheusreceiver/internal/metricfamily.go @@ -103,7 +103,7 @@ func (mg *metricGroup) toDistributionPoint(dest pmetric.HistogramDataPointSlice) var adjustedCount float64 pointIsStale := value.IsStaleNaN(mg.sum) || value.IsStaleNaN(mg.count) - for i := 0; i < bucketCount-1; i++ { + for i := range bucketCount - 1 { bounds[i] = mg.complexValue[i].boundary adjustedCount = mg.complexValue[i].value @@ -238,11 +238,11 @@ func convertDeltaBuckets(spans []histogram.Span, deltas []int64, buckets pcommon bucketCount := int64(0) for spanIdx, span := range spans { if spanIdx > 0 { - for i := int32(0); i < span.Offset; i++ { + for range span.Offset { buckets.Append(uint64(0)) } } - for i := uint32(0); i < span.Length; i++ { + for range span.Length { bucketCount += deltas[bucketIdx] bucketIdx++ buckets.Append(uint64(bucketCount)) @@ -255,11 +255,11 @@ func convertAbsoluteBuckets(spans []histogram.Span, counts []float64, buckets pc bucketIdx := 0 for spanIdx, span := range spans { if spanIdx > 0 { - for i := int32(0); i < span.Offset; i++ { + for range span.Offset { buckets.Append(uint64(0)) } } - for i := uint32(0); i < span.Length; i++ { + for range span.Length { buckets.Append(uint64(counts[bucketIdx])) bucketIdx++ } diff --git a/receiver/prometheusreceiver/internal/metrics_adjuster.go b/receiver/prometheusreceiver/internal/metrics_adjuster.go index 70dd6b6a411f0..60867b08d33c8 100644 --- a/receiver/prometheusreceiver/internal/metrics_adjuster.go +++ b/receiver/prometheusreceiver/internal/metrics_adjuster.go @@ -260,7 +260,7 @@ func NewInitialPointAdjuster(logger *zap.Logger, gcInterval time.Duration, useCr // AdjustMetrics takes a sequence of metrics and adjust their start times based on the initial and // previous points in the timeseriesMap. func (a *initialPointAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) _, found := rm.Resource().Attributes().Get(semconv.AttributeServiceName) if !found { @@ -273,7 +273,7 @@ func (a *initialPointAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { } } - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) job, _ := rm.Resource().Attributes().Get(semconv.AttributeServiceName) instance, _ := rm.Resource().Attributes().Get(semconv.AttributeServiceInstanceID) @@ -282,9 +282,9 @@ func (a *initialPointAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { // The lock on the relevant timeseriesMap is held throughout the adjustment process to ensure that // nothing else can modify the data used for adjustment. tsm.Lock() - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { ilm := rm.ScopeMetrics().At(j) - for k := 0; k < ilm.Metrics().Len(); k++ { + for k := range ilm.Metrics().Len() { metric := ilm.Metrics().At(k) switch dataType := metric.Type(); dataType { case pmetric.MetricTypeGauge: @@ -324,7 +324,7 @@ func (a *initialPointAdjuster) adjustMetricHistogram(tsm *timeseriesMap, current } currentPoints := histogram.DataPoints() - for i := 0; i < currentPoints.Len(); i++ { + for i := range currentPoints.Len() { currentDist := currentPoints.At(i) // start timestamp was set from _created @@ -376,7 +376,7 @@ func (a *initialPointAdjuster) adjustMetricExponentialHistogram(tsm *timeseriesM } currentPoints := histogram.DataPoints() - for i := 0; i < currentPoints.Len(); i++ { + for i := range currentPoints.Len() { currentDist := currentPoints.At(i) // start timestamp was set from _created @@ -422,7 +422,7 @@ func (a *initialPointAdjuster) adjustMetricExponentialHistogram(tsm *timeseriesM func (a *initialPointAdjuster) adjustMetricSum(tsm *timeseriesMap, current pmetric.Metric) { currentPoints := current.Sum().DataPoints() - for i := 0; i < currentPoints.Len(); i++ { + for i := range currentPoints.Len() { currentSum := currentPoints.At(i) // start timestamp was set from _created @@ -466,7 +466,7 @@ func (a *initialPointAdjuster) adjustMetricSum(tsm *timeseriesMap, current pmetr func (a *initialPointAdjuster) adjustMetricSummary(tsm *timeseriesMap, current pmetric.Metric) { currentPoints := current.Summary().DataPoints() - for i := 0; i < currentPoints.Len(); i++ { + for i := range currentPoints.Len() { currentSummary := currentPoints.At(i) // start timestamp was set from _created diff --git a/receiver/prometheusreceiver/internal/metrics_adjuster_test.go b/receiver/prometheusreceiver/internal/metrics_adjuster_test.go index 1f77792695564..07b82b16a1143 100644 --- a/receiver/prometheusreceiver/internal/metrics_adjuster_test.go +++ b/receiver/prometheusreceiver/internal/metrics_adjuster_test.go @@ -743,7 +743,7 @@ func runScript(t *testing.T, ma MetricsAdjuster, job, instance string, tests []* adjusted := pmetric.NewMetrics() test.metrics.CopyTo(adjusted) // Add the instance/job to the input metrics if they aren't already present. - for i := 0; i < adjusted.ResourceMetrics().Len(); i++ { + for i := range adjusted.ResourceMetrics().Len() { rm := adjusted.ResourceMetrics().At(i) _, found := rm.Resource().Attributes().Get(semconv.AttributeServiceName) if !found { @@ -757,7 +757,7 @@ func runScript(t *testing.T, ma MetricsAdjuster, job, instance string, tests []* assert.NoError(t, ma.AdjustMetrics(adjusted)) // Add the instance/job to the expected metrics as well if they aren't already present. - for i := 0; i < test.adjusted.ResourceMetrics().Len(); i++ { + for i := range test.adjusted.ResourceMetrics().Len() { rm := test.adjusted.ResourceMetrics().At(i) _, found := rm.Resource().Attributes().Get(semconv.AttributeServiceName) if !found { @@ -787,7 +787,7 @@ func BenchmarkGetAttributesSignature(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { getAttributesSignature(attrs) } } diff --git a/receiver/prometheusreceiver/internal/metricsutil_test.go b/receiver/prometheusreceiver/internal/metricsutil_test.go index bc1dabbd696dd..5b2835aa7e276 100644 --- a/receiver/prometheusreceiver/internal/metricsutil_test.go +++ b/receiver/prometheusreceiver/internal/metricsutil_test.go @@ -184,7 +184,7 @@ func exponentialHistogramPointSimplified(attributes []*kv, startTimestamp, times var sum float64 var count uint64 - for i := 0; i < bucketCount; i++ { + for i := range bucketCount { positive.BucketCounts().Append(uint64(i + 1)) negative.BucketCounts().Append(uint64(i + 1)) count += uint64(i+1) + uint64(i+1) @@ -268,7 +268,7 @@ func summaryPoint(attributes []*kv, startTimestamp, timestamp pcommon.Timestamp, sdp.SetSum(sum) qvL := sdp.QuantileValues() - for i := 0; i < len(quantiles); i++ { + for i := range quantiles { qvi := qvL.AppendEmpty() qvi.SetQuantile(quantiles[i]) qvi.SetValue(values[i]) diff --git a/receiver/prometheusreceiver/internal/staleness_end_to_end_test.go b/receiver/prometheusreceiver/internal/staleness_end_to_end_test.go index b630ce8ac8cec..58e1e2f65187b 100644 --- a/receiver/prometheusreceiver/internal/staleness_end_to_end_test.go +++ b/receiver/prometheusreceiver/internal/staleness_end_to_end_test.go @@ -189,7 +189,7 @@ service: // 5. Let's wait on 10 fetches. var wReqL []*prompb.WriteRequest - for i := 0; i < 10; i++ { + for range 10 { wReqL = append(wReqL, <-prweUploads) } defer cancel() diff --git a/receiver/prometheusreceiver/internal/starttimemetricadjuster.go b/receiver/prometheusreceiver/internal/starttimemetricadjuster.go index dde13880950d9..ee9c557c04b0f 100644 --- a/receiver/prometheusreceiver/internal/starttimemetricadjuster.go +++ b/receiver/prometheusreceiver/internal/starttimemetricadjuster.go @@ -70,11 +70,11 @@ func (stma *startTimeMetricAdjuster) AdjustMetrics(metrics pmetric.Metrics) erro } startTimeTs := timestampFromFloat64(startTime) - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { ilm := rm.ScopeMetrics().At(j) - for k := 0; k < ilm.Metrics().Len(); k++ { + for k := range ilm.Metrics().Len() { metric := ilm.Metrics().At(k) switch metric.Type() { case pmetric.MetricTypeGauge: @@ -82,28 +82,28 @@ func (stma *startTimeMetricAdjuster) AdjustMetrics(metrics pmetric.Metrics) erro case pmetric.MetricTypeSum: dataPoints := metric.Sum().DataPoints() - for l := 0; l < dataPoints.Len(); l++ { + for l := range dataPoints.Len() { dp := dataPoints.At(l) dp.SetStartTimestamp(startTimeTs) } case pmetric.MetricTypeSummary: dataPoints := metric.Summary().DataPoints() - for l := 0; l < dataPoints.Len(); l++ { + for l := range dataPoints.Len() { dp := dataPoints.At(l) dp.SetStartTimestamp(startTimeTs) } case pmetric.MetricTypeHistogram: dataPoints := metric.Histogram().DataPoints() - for l := 0; l < dataPoints.Len(); l++ { + for l := range dataPoints.Len() { dp := dataPoints.At(l) dp.SetStartTimestamp(startTimeTs) } case pmetric.MetricTypeExponentialHistogram: dataPoints := metric.ExponentialHistogram().DataPoints() - for l := 0; l < dataPoints.Len(); l++ { + for l := range dataPoints.Len() { dp := dataPoints.At(l) dp.SetStartTimestamp(startTimeTs) } @@ -123,11 +123,11 @@ func (stma *startTimeMetricAdjuster) AdjustMetrics(metrics pmetric.Metrics) erro } func (stma *startTimeMetricAdjuster) getStartTime(metrics pmetric.Metrics) (float64, error) { - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { ilm := rm.ScopeMetrics().At(j) - for k := 0; k < ilm.Metrics().Len(); k++ { + for k := range ilm.Metrics().Len() { metric := ilm.Metrics().At(k) if stma.matchStartTimeMetric(metric.Name()) { switch metric.Type() { diff --git a/receiver/prometheusreceiver/internal/starttimemetricadjuster_test.go b/receiver/prometheusreceiver/internal/starttimemetricadjuster_test.go index 07b3c8a56524e..31846bf462035 100644 --- a/receiver/prometheusreceiver/internal/starttimemetricadjuster_test.go +++ b/receiver/prometheusreceiver/internal/starttimemetricadjuster_test.go @@ -129,31 +129,31 @@ func TestStartTimeMetricMatch(t *testing.T) { pmetrics.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceInstanceID, "0") pmetrics.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceName, "job") assert.NoError(t, stma.AdjustMetrics(tt.inputs)) - for i := 0; i < tt.inputs.ResourceMetrics().Len(); i++ { + for i := range tt.inputs.ResourceMetrics().Len() { rm := tt.inputs.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { ilm := rm.ScopeMetrics().At(j) - for k := 0; k < ilm.Metrics().Len(); k++ { + for k := range ilm.Metrics().Len() { metric := ilm.Metrics().At(k) switch metric.Type() { case pmetric.MetricTypeSum: dps := metric.Sum().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) } case pmetric.MetricTypeSummary: dps := metric.Summary().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) } case pmetric.MetricTypeHistogram: dps := metric.Histogram().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) } case pmetric.MetricTypeExponentialHistogram: dps := metric.ExponentialHistogram().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) } case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge: @@ -233,26 +233,26 @@ func TestStartTimeMetricFallback(t *testing.T) { pmetrics.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceInstanceID, "0") pmetrics.ResourceMetrics().At(0).Resource().Attributes().PutStr(semconv.AttributeServiceName, "job") assert.NoError(t, stma.AdjustMetrics(tt.inputs)) - for i := 0; i < tt.inputs.ResourceMetrics().Len(); i++ { + for i := range tt.inputs.ResourceMetrics().Len() { rm := tt.inputs.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { ilm := rm.ScopeMetrics().At(j) - for k := 0; k < ilm.Metrics().Len(); k++ { + for k := range ilm.Metrics().Len() { metric := ilm.Metrics().At(k) switch metric.Type() { case pmetric.MetricTypeSum: dps := metric.Sum().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) } case pmetric.MetricTypeSummary: dps := metric.Summary().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) } case pmetric.MetricTypeHistogram: dps := metric.Histogram().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { assert.Equal(t, tt.expectedStartTime, dps.At(l).StartTimestamp()) } } diff --git a/receiver/prometheusreceiver/internal/transaction.go b/receiver/prometheusreceiver/internal/transaction.go index 4d9f4af3795c3..5b4e48c52af91 100644 --- a/receiver/prometheusreceiver/internal/transaction.go +++ b/receiver/prometheusreceiver/internal/transaction.go @@ -369,7 +369,7 @@ func (t *transaction) getMetrics() (pmetric.Metrics, error) { return true } remove := true - for i := 0; i < metrics.ScopeMetrics().Len(); i++ { + for i := range metrics.ScopeMetrics().Len() { if metrics.ScopeMetrics().At(i).Metrics().Len() > 0 { remove = false break diff --git a/receiver/prometheusreceiver/internal/transaction_test.go b/receiver/prometheusreceiver/internal/transaction_test.go index 2c675b31ccbf6..3f5ddc0a33017 100644 --- a/receiver/prometheusreceiver/internal/transaction_test.go +++ b/receiver/prometheusreceiver/internal/transaction_test.go @@ -216,7 +216,7 @@ func testTransactionAppendMultipleResources(t *testing.T, enableNativeHistograms for _, expectedResource := range expectedResources { foundResource := false expectedServiceName, _ := expectedResource.Attributes().Get(conventions.AttributeServiceName) - for i := 0; i < mds[0].ResourceMetrics().Len(); i++ { + for i := range mds[0].ResourceMetrics().Len() { res := mds[0].ResourceMetrics().At(i).Resource() if serviceName, ok := res.Attributes().Get(conventions.AttributeServiceName); ok { if serviceName.AsString() == expectedServiceName.AsString() { @@ -1902,31 +1902,31 @@ type startTimeAdjuster struct { } func (s *startTimeAdjuster) AdjustMetrics(metrics pmetric.Metrics) error { - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + for i := range metrics.ResourceMetrics().Len() { rm := metrics.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { ilm := rm.ScopeMetrics().At(j) - for k := 0; k < ilm.Metrics().Len(); k++ { + for k := range ilm.Metrics().Len() { metric := ilm.Metrics().At(k) switch metric.Type() { case pmetric.MetricTypeSum: dps := metric.Sum().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { dps.At(l).SetStartTimestamp(s.startTime) } case pmetric.MetricTypeSummary: dps := metric.Summary().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { dps.At(l).SetStartTimestamp(s.startTime) } case pmetric.MetricTypeHistogram: dps := metric.Histogram().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { dps.At(l).SetStartTimestamp(s.startTime) } case pmetric.MetricTypeExponentialHistogram: dps := metric.ExponentialHistogram().DataPoints() - for l := 0; l < dps.Len(); l++ { + for l := range dps.Len() { dps.At(l).SetStartTimestamp(s.startTime) } case pmetric.MetricTypeEmpty, pmetric.MetricTypeGauge: @@ -1977,7 +1977,7 @@ func assertEquivalentMetrics(t *testing.T, want, got pmetric.Metrics) { if want.ResourceMetrics().Len() == 0 { return } - for i := 0; i < want.ResourceMetrics().Len(); i++ { + for i := range want.ResourceMetrics().Len() { wantSm := want.ResourceMetrics().At(i).ScopeMetrics() gotSm := got.ResourceMetrics().At(i).ScopeMetrics() require.Equal(t, wantSm.Len(), gotSm.Len()) @@ -1985,7 +1985,7 @@ func assertEquivalentMetrics(t *testing.T, want, got pmetric.Metrics) { return } - for j := 0; j < wantSm.Len(); j++ { + for j := range wantSm.Len() { wantMs := wantSm.At(j).Metrics() gotMs := gotSm.At(j).Metrics() require.Equal(t, wantMs.Len(), gotMs.Len()) @@ -1993,7 +1993,7 @@ func assertEquivalentMetrics(t *testing.T, want, got pmetric.Metrics) { wmap := map[string]pmetric.Metric{} gmap := map[string]pmetric.Metric{} - for k := 0; k < wantMs.Len(); k++ { + for k := range wantMs.Len() { wi := wantMs.At(k) wmap[wi.Name()] = wi gi := gotMs.At(k) diff --git a/receiver/prometheusreceiver/metrics_receiver_helper_test.go b/receiver/prometheusreceiver/metrics_receiver_helper_test.go index dda1551ed3b6c..c5765584495a5 100644 --- a/receiver/prometheusreceiver/metrics_receiver_helper_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_helper_test.go @@ -140,7 +140,7 @@ func setupMockPrometheus(tds ...*testData) (*mockPrometheus, *PromConfig, error) } mp := newMockPrometheus(endpoints) u, _ := url.Parse(mp.srv.URL) - for i := 0; i < len(tds); i++ { + for i := range tds { job := make(map[string]any) job["job_name"] = tds[i].name job["metrics_path"] = metricPaths[i] @@ -218,9 +218,9 @@ func verifyNumTotalScrapeResults(t *testing.T, td *testData, resourceMetrics []p func getMetrics(rm pmetric.ResourceMetrics) []pmetric.Metric { var metrics []pmetric.Metric ilms := rm.ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { metricSlice := ilms.At(j).Metrics() - for i := 0; i < metricSlice.Len(); i++ { + for i := range metricSlice.Len() { metrics = append(metrics, metricSlice.At(i)) } } @@ -230,7 +230,7 @@ func getMetrics(rm pmetric.ResourceMetrics) []pmetric.Metric { func metricsCount(resourceMetric pmetric.ResourceMetrics) int { metricsCount := 0 ilms := resourceMetric.ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { ilm := ilms.At(j) metricsCount += ilm.Metrics().Len() } @@ -243,7 +243,7 @@ func getValidScrapes(t *testing.T, rms []pmetric.ResourceMetrics, target *testDa // for metrics retrieved with 'honor_labels: true', there will be a resource metric containing the scrape metrics, based on the scrape job config, // and resources containing only the retrieved metrics, without additional scrape metrics, based on the job/instance label pairs that are detected // during a scrape - for i := 0; i < len(rms); i++ { + for i := range rms { allMetrics := getMetrics(rms[i]) if expectedScrapeMetricCount <= len(allMetrics) && countScrapeMetrics(allMetrics, target.normalizedName) == expectedScrapeMetricCount || expectedExtraScrapeMetricCount <= len(allMetrics) && countScrapeMetrics(allMetrics, target.normalizedName) == expectedExtraScrapeMetricCount { @@ -301,31 +301,31 @@ func isFirstFailedScrape(metrics []pmetric.Metric, normalizedNames bool) bool { switch m.Type() { case pmetric.MetricTypeGauge: - for i := 0; i < m.Gauge().DataPoints().Len(); i++ { + for i := range m.Gauge().DataPoints().Len() { if !m.Gauge().DataPoints().At(i).Flags().NoRecordedValue() { return false } } case pmetric.MetricTypeSum: - for i := 0; i < m.Sum().DataPoints().Len(); i++ { + for i := range m.Sum().DataPoints().Len() { if !m.Sum().DataPoints().At(i).Flags().NoRecordedValue() { return false } } case pmetric.MetricTypeExponentialHistogram: - for i := 0; i < m.ExponentialHistogram().DataPoints().Len(); i++ { + for i := range m.ExponentialHistogram().DataPoints().Len() { if !m.ExponentialHistogram().DataPoints().At(i).Flags().NoRecordedValue() { return false } } case pmetric.MetricTypeHistogram: - for i := 0; i < m.Histogram().DataPoints().Len(); i++ { + for i := range m.Histogram().DataPoints().Len() { if !m.Histogram().DataPoints().At(i).Flags().NoRecordedValue() { return false } } case pmetric.MetricTypeSummary: - for i := 0; i < m.Summary().DataPoints().Len(); i++ { + for i := range m.Summary().DataPoints().Len() { if !m.Summary().DataPoints().At(i).Flags().NoRecordedValue() { return false } @@ -349,9 +349,9 @@ func assertUp(t *testing.T, expected float64, metrics []pmetric.Metric) { func countScrapeMetricsRM(got pmetric.ResourceMetrics, normalizedNames bool) int { n := 0 ilms := got.ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { ilm := ilms.At(j) - for i := 0; i < ilm.Metrics().Len(); i++ { + for i := range ilm.Metrics().Len() { if isDefaultMetrics(ilm.Metrics().At(i), normalizedNames) { n++ } @@ -661,7 +661,7 @@ func compareSummary(count uint64, sum float64, quantiles [][]float64) summaryPoi assert.Equal(t, sum, summaryDataPoint.Sum(), "Summary sum value does not match") req := assert.Equal(t, len(quantiles), summaryDataPoint.QuantileValues().Len()) if req { - for i := 0; i < summaryDataPoint.QuantileValues().Len(); i++ { + for i := range summaryDataPoint.QuantileValues().Len() { assert.Equal(t, quantiles[i][0], summaryDataPoint.QuantileValues().At(i).Quantile(), "Summary quantile do not match") if math.IsNaN(quantiles[i][1]) { @@ -755,7 +755,7 @@ func splitMetricsByTarget(metrics []pmetric.Metrics) map[string][]pmetric.Resour pResults := make(map[string][]pmetric.ResourceMetrics) for _, md := range metrics { rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { + for i := range rms.Len() { name, _ := rms.At(i).Resource().Attributes().Get("service.name") pResults[name.AsString()] = append(pResults[name.AsString()], rms.At(i)) } diff --git a/receiver/prometheusreceiver/metrics_receiver_non_numerical_test.go b/receiver/prometheusreceiver/metrics_receiver_non_numerical_test.go index ac3c23561df5f..3c7d512d32293 100644 --- a/receiver/prometheusreceiver/metrics_receiver_non_numerical_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_non_numerical_test.go @@ -47,7 +47,7 @@ var totalScrapes = 10 // TestStaleNaNs validates that staleness marker gets generated when the timeseries is no longer present func TestStaleNaNs(t *testing.T) { var mockResponses []mockPrometheusResponse - for i := 0; i < totalScrapes; i++ { + for i := range totalScrapes { if i%2 == 0 { mockResponses = append(mockResponses, mockPrometheusResponse{ code: 200, @@ -75,7 +75,7 @@ func verifyStaleNaNs(t *testing.T, td *testData, resourceMetrics []pmetric.Resou verifyNumTotalScrapeResults(t, td, resourceMetrics) metrics1 := resourceMetrics[0].ScopeMetrics().At(0).Metrics() ts := getTS(metrics1) - for i := 0; i < totalScrapes; i++ { + for i := range totalScrapes { if i%2 == 0 { verifyStaleNaNsSuccessfulScrape(t, td, resourceMetrics[i], ts, i+1) } else { diff --git a/receiver/prometheusreceiver/metrics_receiver_scrape_config_files_test.go b/receiver/prometheusreceiver/metrics_receiver_scrape_config_files_test.go index 7654cd0dad0b3..34d1a690534be 100644 --- a/receiver/prometheusreceiver/metrics_receiver_scrape_config_files_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_scrape_config_files_test.go @@ -58,7 +58,7 @@ func verifyScrapeConfigFiles(t *testing.T, _ *testData, result []pmetric.Resourc assert.Equal(t, 6, result[0].ScopeMetrics().At(0).Metrics().Len()) metricFound := false - for i := 0; i < result[0].ScopeMetrics().At(0).Metrics().Len(); i++ { + for i := range result[0].ScopeMetrics().At(0).Metrics().Len() { if result[0].ScopeMetrics().At(0).Metrics().At(i).Name() == "foo1" { metricFound = true break diff --git a/receiver/prometheusreceiver/metrics_receiver_test.go b/receiver/prometheusreceiver/metrics_receiver_test.go index f635fc4879843..2b0f339ddbf44 100644 --- a/receiver/prometheusreceiver/metrics_receiver_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_test.go @@ -1377,31 +1377,31 @@ func verifyStartTimeMetricPage(t *testing.T, td *testData, result []pmetric.Reso numTimeseries := 0 for _, rm := range result { metrics := getMetrics(rm) - for i := 0; i < len(metrics); i++ { + for i := range metrics { timestamp := startTimeMetricPageStartTimestamp switch metrics[i].Type() { case pmetric.MetricTypeGauge: timestamp = nil - for j := 0; j < metrics[i].Gauge().DataPoints().Len(); j++ { + for j := range metrics[i].Gauge().DataPoints().Len() { time := metrics[i].Gauge().DataPoints().At(j).StartTimestamp() assert.Equal(t, timestamp.AsTime(), time.AsTime()) numTimeseries++ } case pmetric.MetricTypeSum: - for j := 0; j < metrics[i].Sum().DataPoints().Len(); j++ { + for j := range metrics[i].Sum().DataPoints().Len() { assert.Equal(t, timestamp.AsTime(), metrics[i].Sum().DataPoints().At(j).StartTimestamp().AsTime()) numTimeseries++ } case pmetric.MetricTypeHistogram: - for j := 0; j < metrics[i].Histogram().DataPoints().Len(); j++ { + for j := range metrics[i].Histogram().DataPoints().Len() { assert.Equal(t, timestamp.AsTime(), metrics[i].Histogram().DataPoints().At(j).StartTimestamp().AsTime()) numTimeseries++ } case pmetric.MetricTypeSummary: - for j := 0; j < metrics[i].Summary().DataPoints().Len(); j++ { + for j := range metrics[i].Summary().DataPoints().Len() { assert.Equal(t, timestamp.AsTime(), metrics[i].Summary().DataPoints().At(j).StartTimestamp().AsTime()) numTimeseries++ } diff --git a/receiver/prometheusremotewritereceiver/receiver.go b/receiver/prometheusremotewritereceiver/receiver.go index 741d43929b396..9a339eb1348fd 100644 --- a/receiver/prometheusremotewritereceiver/receiver.go +++ b/receiver/prometheusremotewritereceiver/receiver.go @@ -243,7 +243,7 @@ func addGaugeDatapoints(rm pmetric.ResourceMetrics, ls labels.Labels, ts writev2 // Check if the name and version present in the labels are already present in the ResourceMetrics. // If it is not present, we should create a new ScopeMetrics. // Otherwise, we should append to the existing ScopeMetrics. - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { scope := rm.ScopeMetrics().At(j) if scopeName == scope.Scope().Name() && scopeVersion == scope.Scope().Version() { addDatapoints(scope.Metrics().AppendEmpty().SetEmptyGauge().DataPoints(), ls, ts) diff --git a/receiver/receivercreator/consumer.go b/receiver/receivercreator/consumer.go index f179f4a4630b0..b0ef6ee63cf2b 100644 --- a/receiver/receivercreator/consumer.go +++ b/receiver/receivercreator/consumer.go @@ -86,7 +86,7 @@ func (ec *enhancingConsumer) ConsumeLogs(ctx context.Context, ld plog.Logs) erro return errors.New("no log consumer available") } rl := ld.ResourceLogs() - for i := 0; i < rl.Len(); i++ { + for i := range rl.Len() { ec.putAttrs(rl.At(i).Resource().Attributes()) } @@ -98,7 +98,7 @@ func (ec *enhancingConsumer) ConsumeMetrics(ctx context.Context, md pmetric.Metr return errors.New("no metric consumer available") } rm := md.ResourceMetrics() - for i := 0; i < rm.Len(); i++ { + for i := range rm.Len() { ec.putAttrs(rm.At(i).Resource().Attributes()) } @@ -110,7 +110,7 @@ func (ec *enhancingConsumer) ConsumeTraces(ctx context.Context, td ptrace.Traces return errors.New("no trace consumer available") } rs := td.ResourceSpans() - for i := 0; i < rs.Len(); i++ { + for i := range rs.Len() { ec.putAttrs(rs.At(i).Resource().Attributes()) } diff --git a/receiver/redisreceiver/redis_scraper.go b/receiver/redisreceiver/redis_scraper.go index 51eff8ee17772..4d13c8ff94573 100644 --- a/receiver/redisreceiver/redis_scraper.go +++ b/receiver/redisreceiver/redis_scraper.go @@ -137,7 +137,7 @@ func (rs *redisScraper) recordCommonMetrics(ts pcommon.Timestamp, inf info) { // recordKeyspaceMetrics records metrics from 'keyspace' Redis info key-value pairs, // e.g. "db0: keys=1,expires=2,avg_ttl=3". func (rs *redisScraper) recordKeyspaceMetrics(ts pcommon.Timestamp, inf info) { - for db := 0; db < redisMaxDbs; db++ { + for db := range redisMaxDbs { key := "db" + strconv.Itoa(db) str, ok := inf[key] if !ok { diff --git a/receiver/signalfxreceiver/receiver.go b/receiver/signalfxreceiver/receiver.go index 65c95fdedaa75..2525febb5ed4f 100644 --- a/receiver/signalfxreceiver/receiver.go +++ b/receiver/signalfxreceiver/receiver.go @@ -367,7 +367,7 @@ func (r *sfxReceiver) failRequest( func (r *sfxReceiver) addAccessTokenLabel(md pmetric.Metrics, req *http.Request) { if r.config.AccessTokenPassthrough { if accessToken := req.Header.Get(splunk.SFxAccessTokenHeader); accessToken != "" { - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rm := md.ResourceMetrics().At(i) res := rm.Resource() res.Attributes().PutStr(splunk.SFxAccessTokenLabel, accessToken) diff --git a/receiver/signalfxreceiver/receiver_test.go b/receiver/signalfxreceiver/receiver_test.go index 1d44ef80e9a76..e7ad7e5629019 100644 --- a/receiver/signalfxreceiver/receiver_test.go +++ b/receiver/signalfxreceiver/receiver_test.go @@ -965,7 +965,7 @@ func sfxCategoryPtr(t sfxpb.EventCategory) *sfxpb.EventCategory { func buildNDimensions(n uint) []*sfxpb.Dimension { d := make([]*sfxpb.Dimension, 0, n) - for i := uint(0); i < n; i++ { + for i := range n { idx := int(i) suffix := strconv.Itoa(idx) d = append(d, &sfxpb.Dimension{ @@ -1027,7 +1027,7 @@ func buildHistogram(im pmetric.Metric) { } func addAttributes(count int, dst pcommon.Map) { - for i := 0; i < count; i++ { + for i := range count { suffix := strconv.Itoa(i) dst.PutStr("k"+suffix, "v"+suffix) } @@ -1040,7 +1040,7 @@ func buildOtlpMetrics(metricsCount int) *pmetric.Metrics { ilm := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() ilm.EnsureCapacity(metricsCount) - for i := 0; i < metricsCount; i++ { + for i := range metricsCount { switch i % 2 { case 0: buildGauge(ilm.AppendEmpty()) diff --git a/receiver/solacereceiver/receiver_test.go b/receiver/solacereceiver/receiver_test.go index d67139f36ba23..b7bb7571f911b 100644 --- a/receiver/solacereceiver/receiver_test.go +++ b/receiver/solacereceiver/receiver_test.go @@ -835,7 +835,7 @@ func (m *mockUnmarshaller) unmarshal(message *inboundMessage) (ptrace.Traces, er func newTestTracesWithSpans(spanCount int) ptrace.Traces { traces := ptrace.NewTraces() spans := traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty() - for i := 0; i < spanCount; i++ { + for range spanCount { spans.Spans().AppendEmpty() } return traces diff --git a/receiver/solacereceiver/unmarshaller_test.go b/receiver/solacereceiver/unmarshaller_test.go index 9b3a17c4cd679..537d0a404b5a9 100644 --- a/receiver/solacereceiver/unmarshaller_test.go +++ b/receiver/solacereceiver/unmarshaller_test.go @@ -343,7 +343,7 @@ func TestSolaceMessageUnmarshallerUnmarshal(t *testing.T) { instrumentation := resource.ScopeSpans().At(0) assert.Equal(t, expectedInstrumentation.Scope(), instrumentation.Scope()) require.Equal(t, expectedInstrumentation.Spans().Len(), instrumentation.Spans().Len()) - for i := 0; i < expectedInstrumentation.Spans().Len(); i++ { + for i := range expectedInstrumentation.Spans().Len() { expectedSpan := expectedInstrumentation.Spans().At(i) span := instrumentation.Spans().At(i) compareSpans(t, expectedSpan, span) @@ -366,7 +366,7 @@ func compareSpans(t *testing.T, expected, actual ptrace.Span) { assert.Equal(t, expected.EndTimestamp(), actual.EndTimestamp()) assert.Equal(t, expected.Attributes().AsRaw(), actual.Attributes().AsRaw()) require.Equal(t, expected.Events().Len(), actual.Events().Len()) - for i := 0; i < expected.Events().Len(); i++ { + for i := range expected.Events().Len() { lessFunc := func(a, b ptrace.SpanEvent) bool { return a.Name() < b.Name() // choose any comparison here } diff --git a/receiver/splunkhecreceiver/receiver_test.go b/receiver/splunkhecreceiver/receiver_test.go index de46542216495..17f7a7396d957 100644 --- a/receiver/splunkhecreceiver/receiver_test.go +++ b/receiver/splunkhecreceiver/receiver_test.go @@ -841,7 +841,7 @@ func buildSplunkHecMetricsMsg(time float64, value int64, dimensions uint) *splun "metric_name:foo": value, }, } - for dim := uint(0); dim < dimensions; dim++ { + for dim := range dimensions { ev.Fields[fmt.Sprintf("k%d", dim)] = fmt.Sprintf("v%d", dim) } @@ -856,7 +856,7 @@ func buildSplunkHecMsg(time float64, dimensions uint) *splunk.Event { Index: "myindex", SourceType: "custom:sourcetype", } - for dim := uint(0); dim < dimensions; dim++ { + for dim := range dimensions { ev.Fields[fmt.Sprintf("k%d", dim)] = fmt.Sprintf("v%d", dim) } @@ -1923,14 +1923,14 @@ func BenchmarkHandleReq(b *testing.B) { msgBytes, err := json.Marshal(splunkMsg) require.NoError(b, err) totalMessage := make([]byte, 100*len(msgBytes)) - for i := 0; i < 100; i++ { + for i := range 100 { offset := len(msgBytes) * i for bi, b := range msgBytes { totalMessage[offset+bi] = b } } - for n := 0; n < b.N; n++ { + for range b.N { req := httptest.NewRequest(http.MethodPost, "http://localhost/foo", bytes.NewReader(totalMessage)) rcv.handleReq(w, req) diff --git a/receiver/splunkhecreceiver/splunk_to_logdata_test.go b/receiver/splunkhecreceiver/splunk_to_logdata_test.go index 15e8bc95df806..c25fe6c6d683f 100644 --- a/receiver/splunkhecreceiver/splunk_to_logdata_test.go +++ b/receiver/splunkhecreceiver/splunk_to_logdata_test.go @@ -333,7 +333,7 @@ func Test_SplunkHecToLogData(t *testing.T) { result, err := splunkHecToLogData(zap.NewNop(), tt.events, func(_ pcommon.Resource) {}, tt.hecConfig) assert.Equal(t, tt.wantErr, err) require.Equal(t, tt.output.Len(), result.ResourceLogs().Len()) - for i := 0; i < result.ResourceLogs().Len(); i++ { + for i := range result.ResourceLogs().Len() { assert.Equal(t, tt.output.At(i), result.ResourceLogs().At(i)) } }) diff --git a/receiver/sshcheckreceiver/scraper_test.go b/receiver/sshcheckreceiver/scraper_test.go index 074370b344453..154fc0bfe55ed 100644 --- a/receiver/sshcheckreceiver/scraper_test.go +++ b/receiver/sshcheckreceiver/scraper_test.go @@ -233,7 +233,7 @@ func TestScraperPropagatesResourceAttributes(t *testing.T) { resourceMetrics := actualMetrics.ResourceMetrics() expectedResourceAttributes := map[string]any{"ssh.endpoint": endpoint} - for i := 0; i < resourceMetrics.Len(); i++ { + for i := range resourceMetrics.Len() { resourceAttributes := resourceMetrics.At(i).Resource().Attributes() for name, value := range expectedResourceAttributes { actualAttributeValue, ok := resourceAttributes.Get(name) diff --git a/receiver/statsdreceiver/internal/protocol/metric_translator.go b/receiver/statsdreceiver/internal/protocol/metric_translator.go index 1cec9821cffd3..9aad82c630442 100644 --- a/receiver/statsdreceiver/internal/protocol/metric_translator.go +++ b/receiver/statsdreceiver/internal/protocol/metric_translator.go @@ -136,7 +136,7 @@ func buildHistogramMetric(desc statsDMetricDescription, histogram histogramMetri out.BucketCounts().EnsureCapacity(int(in.Len())) - for i := uint32(0); i < in.Len(); i++ { + for i := range in.Len() { out.BucketCounts().Append(in.At(i)) } } diff --git a/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go b/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go index bfdab71671289..3bac30871d29d 100644 --- a/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go +++ b/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go @@ -1622,9 +1622,9 @@ func TestStatsDParser_Mappings(t *testing.T) { metrics := p.GetMetrics()[0].Metrics ilm := metrics.ResourceMetrics().At(0).ScopeMetrics() - for i := 0; i < ilm.Len(); i++ { + for i := range ilm.Len() { ilms := ilm.At(i).Metrics() - for j := 0; j < ilms.Len(); j++ { + for j := range ilms.Len() { m := ilms.At(j) typeNames[m.Type().String()] = m.Name() } @@ -1664,7 +1664,7 @@ func TestStatsDParser_ScopeIsIncluded(t *testing.T) { require.Equal(t, 5, metrics.MetricCount()) el := metrics.ResourceMetrics().At(0) - for i := 0; i < metrics.MetricCount(); i++ { + for i := range metrics.MetricCount() { scope := el.ScopeMetrics().At(i).Scope() assert.Equal(t, receiverName, scope.Name()) diff --git a/receiver/syslogreceiver/syslog_test.go b/receiver/syslogreceiver/syslog_test.go index a6213a233ef6f..3216e0eab0be1 100644 --- a/receiver/syslogreceiver/syslog_test.go +++ b/receiver/syslogreceiver/syslog_test.go @@ -54,7 +54,7 @@ func testSyslog(t *testing.T, cfg *SysLogConfig) { require.NoError(t, err) } - for i := 0; i < numLogs; i++ { + for i := range numLogs { msg := fmt.Sprintf("<86>1 2021-02-28T00:0%d:02.003Z 192.168.1.1 SecureAuth0 23108 ID52020 [SecureAuth@27389] test msg %d\n", i, i) _, err = conn.Write([]byte(msg)) require.NoError(t, err) @@ -68,7 +68,7 @@ func testSyslog(t *testing.T, cfg *SysLogConfig) { resourceLogs := sink.AllLogs()[0].ResourceLogs().At(0) logs := resourceLogs.ScopeLogs().At(0).LogRecords() - for i := 0; i < numLogs; i++ { + for i := range numLogs { log := logs.At(i) require.Equal(t, log.Timestamp(), pcommon.Timestamp(1614470402003000000+i*60*1000*1000*1000)) diff --git a/receiver/tcplogreceiver/tcp_test.go b/receiver/tcplogreceiver/tcp_test.go index cbe07cd254ce9..3c1971b446f21 100644 --- a/receiver/tcplogreceiver/tcp_test.go +++ b/receiver/tcplogreceiver/tcp_test.go @@ -41,7 +41,7 @@ func testTCP(t *testing.T, cfg *TCPLogConfig) { conn, err = net.Dial("tcp", "127.0.0.1:29018") require.NoError(t, err) - for i := 0; i < numLogs; i++ { + for i := range numLogs { msg := fmt.Sprintf("<86>1 2021-02-28T00:0%d:02.003Z test msg %d\n", i, i) _, err = conn.Write([]byte(msg)) require.NoError(t, err) @@ -55,7 +55,7 @@ func testTCP(t *testing.T, cfg *TCPLogConfig) { resourceLogs := sink.AllLogs()[0].ResourceLogs().At(0) logs := resourceLogs.ScopeLogs().At(0).LogRecords() - for i := 0; i < numLogs; i++ { + for i := range numLogs { log := logs.At(i) msg := log.Body() diff --git a/receiver/udplogreceiver/udp_test.go b/receiver/udplogreceiver/udp_test.go index 4a5c7c3eddb00..e615809f71ea3 100644 --- a/receiver/udplogreceiver/udp_test.go +++ b/receiver/udplogreceiver/udp_test.go @@ -55,7 +55,7 @@ func testUDP(t *testing.T, cfg *UDPLogConfig, listenAddress string) { conn, err = net.Dial("udp", listenAddress) require.NoError(t, err) - for i := 0; i < numLogs; i++ { + for i := range numLogs { msg := fmt.Sprintf("<86>1 2021-02-28T00:0%d:02.003Z test msg %d\n", i, i) _, err = conn.Write([]byte(msg)) require.NoError(t, err) @@ -72,11 +72,11 @@ func testUDP(t *testing.T, cfg *UDPLogConfig, listenAddress string) { expectedLogs := make([]string, numLogs) - for i := 0; i < numLogs; i++ { + for i := range numLogs { expectedLogs[i] = fmt.Sprintf("<86>1 2021-02-28T00:0%d:02.003Z test msg %d", i, i) } - for i := 0; i < numLogs; i++ { + for i := range numLogs { assert.Contains(t, expectedLogs, logs.At(i).Body().Str()) } } diff --git a/receiver/wavefrontreceiver/receiver_test.go b/receiver/wavefrontreceiver/receiver_test.go index cd0859f755d09..53a94a35525c2 100644 --- a/receiver/wavefrontreceiver/receiver_test.go +++ b/receiver/wavefrontreceiver/receiver_test.go @@ -134,11 +134,11 @@ func Test_wavefrontreceiver_EndToEnd(t *testing.T) { metrics := sink.AllMetrics() var gotMetrics []pmetric.Metric for _, md := range metrics { - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := range md.ResourceMetrics().Len() { rm := md.ResourceMetrics().At(i) - for j := 0; j < rm.ScopeMetrics().Len(); j++ { + for j := range rm.ScopeMetrics().Len() { sm := rm.ScopeMetrics().At(j) - for k := 0; k < sm.Metrics().Len(); k++ { + for k := range sm.Metrics().Len() { gotMetrics = append(gotMetrics, sm.Metrics().At(k)) } } diff --git a/receiver/windowseventlogreceiver/receiver_windows_test.go b/receiver/windowseventlogreceiver/receiver_windows_test.go index 3be8a57cade0a..644cd3240930e 100644 --- a/receiver/windowseventlogreceiver/receiver_windows_test.go +++ b/receiver/windowseventlogreceiver/receiver_windows_test.go @@ -96,7 +96,7 @@ func BenchmarkReadWindowsEventLogger(b *testing.B) { } for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { // Set up the receiver and sink. ctx := context.Background() factory := newFactoryAdapter() @@ -342,12 +342,12 @@ func assertExpectedLogRecords(t *testing.T, sink *consumertest.LogsSink, expecte func filterAllLogRecordsBySource(t *testing.T, sink *consumertest.LogsSink, src string) (filteredLogRecords []plog.LogRecord) { for _, logs := range sink.AllLogs() { resourceLogsSlice := logs.ResourceLogs() - for i := 0; i < resourceLogsSlice.Len(); i++ { + for i := range resourceLogsSlice.Len() { resourceLogs := resourceLogsSlice.At(i) scopeLogsSlice := resourceLogs.ScopeLogs() - for j := 0; j < scopeLogsSlice.Len(); j++ { + for j := range scopeLogsSlice.Len() { logRecords := scopeLogsSlice.At(j).LogRecords() - for k := 0; k < logRecords.Len(); k++ { + for k := range logRecords.Len() { logRecord := logRecords.At(k) if extractEventSourceFromLogRecord(t, logRecord) == src { filteredLogRecords = append(filteredLogRecords, logRecord) diff --git a/receiver/zipkinreceiver/proto_parse_test.go b/receiver/zipkinreceiver/proto_parse_test.go index 2fefe29428dbe..dcbdced22cbdf 100644 --- a/receiver/zipkinreceiver/proto_parse_test.go +++ b/receiver/zipkinreceiver/proto_parse_test.go @@ -139,11 +139,11 @@ func TestConvertSpansToTraceSpans_protobuf(t *testing.T) { assert.Equal(t, want.SpanCount(), reqs.SpanCount()) assert.Equal(t, want.ResourceSpans().Len(), reqs.ResourceSpans().Len()) - for i := 0; i < want.ResourceSpans().Len(); i++ { + for i := range want.ResourceSpans().Len() { wantRS := want.ResourceSpans().At(i) wSvcName, ok := wantRS.Resource().Attributes().Get(conventions.AttributeServiceName) assert.True(t, ok) - for j := 0; j < reqs.ResourceSpans().Len(); j++ { + for j := range reqs.ResourceSpans().Len() { reqsRS := reqs.ResourceSpans().At(j) rSvcName, ok := reqsRS.Resource().Attributes().Get(conventions.AttributeServiceName) assert.True(t, ok) diff --git a/testbed/correctnesstests/metrics/metrics_test_harness.go b/testbed/correctnesstests/metrics/metrics_test_harness.go index e27e2645249f8..95448a5e16e49 100644 --- a/testbed/correctnesstests/metrics/metrics_test_harness.go +++ b/testbed/correctnesstests/metrics/metrics_test_harness.go @@ -68,7 +68,7 @@ func (h *testHarness) ConsumeMetrics(_ context.Context, pdm pmetric.Metrics) err func (h *testHarness) compare(pdm pmetric.Metrics) { pdms := pdm.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() var diffs []*metricstestutil.MetricDiff - for i := 0; i < pdms.Len(); i++ { + for i := range pdms.Len() { pdmRecd := pdms.At(i) metricName := pdmRecd.Name() metric, found := h.metricIndex.lookup(metricName) diff --git a/testbed/datasenders/fluent.go b/testbed/datasenders/fluent.go index 4325bf451470e..346a87e6a8bb0 100644 --- a/testbed/datasenders/fluent.go +++ b/testbed/datasenders/fluent.go @@ -77,10 +77,10 @@ func (f *FluentLogsForwarder) Stop() error { } func (f *FluentLogsForwarder) ConsumeLogs(_ context.Context, logs plog.Logs) error { - for i := 0; i < logs.ResourceLogs().Len(); i++ { - for j := 0; j < logs.ResourceLogs().At(i).ScopeLogs().Len(); j++ { + for i := range logs.ResourceLogs().Len() { + for j := range logs.ResourceLogs().At(i).ScopeLogs().Len() { ills := logs.ResourceLogs().At(i).ScopeLogs().At(j) - for k := 0; k < ills.LogRecords().Len(); k++ { + for k := range ills.LogRecords().Len() { if f.dataFile == nil { if err := f.fluentLogger.Post("", f.convertLogToMap(ills.LogRecords().At(k))); err != nil { return err diff --git a/testbed/datasenders/k8s.go b/testbed/datasenders/k8s.go index a894b2c366f3a..33fea8c73d6cf 100644 --- a/testbed/datasenders/k8s.go +++ b/testbed/datasenders/k8s.go @@ -79,10 +79,10 @@ func (f *FileLogK8sWriter) Start() error { } func (f *FileLogK8sWriter) ConsumeLogs(_ context.Context, logs plog.Logs) error { - for i := 0; i < logs.ResourceLogs().Len(); i++ { - for j := 0; j < logs.ResourceLogs().At(i).ScopeLogs().Len(); j++ { + for i := range logs.ResourceLogs().Len() { + for j := range logs.ResourceLogs().At(i).ScopeLogs().Len() { ills := logs.ResourceLogs().At(i).ScopeLogs().At(j) - for k := 0; k < ills.LogRecords().Len(); k++ { + for k := range ills.LogRecords().Len() { _, err := f.file.Write(append(f.convertLogToTextLine(ills.LogRecords().At(k)), '\n')) if err != nil { return err diff --git a/testbed/datasenders/stanza.go b/testbed/datasenders/stanza.go index d056075e8c094..e81ab958410e1 100644 --- a/testbed/datasenders/stanza.go +++ b/testbed/datasenders/stanza.go @@ -62,10 +62,10 @@ func (f *FileLogWriter) Start() error { } func (f *FileLogWriter) ConsumeLogs(_ context.Context, logs plog.Logs) error { - for i := 0; i < logs.ResourceLogs().Len(); i++ { - for j := 0; j < logs.ResourceLogs().At(i).ScopeLogs().Len(); j++ { + for i := range logs.ResourceLogs().Len() { + for j := range logs.ResourceLogs().At(i).ScopeLogs().Len() { ills := logs.ResourceLogs().At(i).ScopeLogs().At(j) - for k := 0; k < ills.LogRecords().Len(); k++ { + for k := range ills.LogRecords().Len() { _, err := f.file.Write(append(f.convertLogToTextLine(ills.LogRecords().At(k)), '\n')) if err != nil { return err diff --git a/testbed/datasenders/syslog.go b/testbed/datasenders/syslog.go index 2b98b06261b8b..2d5fd0e069d0a 100644 --- a/testbed/datasenders/syslog.go +++ b/testbed/datasenders/syslog.go @@ -66,10 +66,10 @@ func (f *SyslogWriter) Start() (err error) { } func (f *SyslogWriter) ConsumeLogs(_ context.Context, logs plog.Logs) error { - for i := 0; i < logs.ResourceLogs().Len(); i++ { - for j := 0; j < logs.ResourceLogs().At(i).ScopeLogs().Len(); j++ { + for i := range logs.ResourceLogs().Len() { + for j := range logs.ResourceLogs().At(i).ScopeLogs().Len() { ills := logs.ResourceLogs().At(i).ScopeLogs().At(j) - for k := 0; k < ills.LogRecords().Len(); k++ { + for k := range ills.LogRecords().Len() { err := f.Send(ills.LogRecords().At(k)) if err != nil { return err diff --git a/testbed/datasenders/tcpudp.go b/testbed/datasenders/tcpudp.go index 8a77c02f80732..a4aa3b2fd6fc9 100644 --- a/testbed/datasenders/tcpudp.go +++ b/testbed/datasenders/tcpudp.go @@ -67,10 +67,10 @@ func (f *TCPUDPWriter) Start() (err error) { } func (f *TCPUDPWriter) ConsumeLogs(_ context.Context, logs plog.Logs) error { - for i := 0; i < logs.ResourceLogs().Len(); i++ { - for j := 0; j < logs.ResourceLogs().At(i).ScopeLogs().Len(); j++ { + for i := range logs.ResourceLogs().Len() { + for j := range logs.ResourceLogs().At(i).ScopeLogs().Len() { ills := logs.ResourceLogs().At(i).ScopeLogs().At(j) - for k := 0; k < ills.LogRecords().Len(); k++ { + for k := range ills.LogRecords().Len() { err := f.Send(ills.LogRecords().At(k)) if err != nil { return err diff --git a/testbed/mockdatasenders/mockdatadogagentexporter/traces_exporter.go b/testbed/mockdatasenders/mockdatadogagentexporter/traces_exporter.go index 93bddfd833ba8..2a0e1e62ce266 100644 --- a/testbed/mockdatasenders/mockdatadogagentexporter/traces_exporter.go +++ b/testbed/mockdatasenders/mockdatadogagentexporter/traces_exporter.go @@ -45,12 +45,12 @@ func (dd *ddExporter) start(ctx context.Context, host component.Host) (err error func (dd *ddExporter) pushTraces(ctx context.Context, td ptrace.Traces) error { var traces pb.Traces - for i := 0; i < td.ResourceSpans().Len(); i++ { + for i := range td.ResourceSpans().Len() { resSpans := td.ResourceSpans().At(i) var trace pb.Trace - for l := 0; l < resSpans.ScopeSpans().Len(); l++ { + for range resSpans.ScopeSpans().Len() { ils := resSpans.ScopeSpans().At(i) - for s := 0; s < ils.Spans().Len(); s++ { + for s := range ils.Spans().Len() { span := ils.Spans().At(s) newSpan := pb.Span{ Service: "test",