From df3c9e38a80ccc3b14705462be2e2e51c628a3b3 Mon Sep 17 00:00:00 2001 From: Damien Mathieu <42@dmathieu.com> Date: Thu, 19 Sep 2024 13:17:38 +0200 Subject: [PATCH] [chore] split exporterhelper so a separate profiles module can use it (#11215) #### Description This is part of #11131, splitting the exporterhelper module with a new `internal` submodule that holds everything shared by the signals. This is so a new `exporterhelperprofiles` module can make use of the shared structs. --- exporter/exporterhelper/common.go | 328 +---------------- exporter/exporterhelper/exporterhelper.go | 6 - .../exporterhelper/internal/base_exporter.go | 341 ++++++++++++++++++ .../base_exporter_test.go} | 40 +- .../{ => internal}/batch_sender.go | 43 +-- .../{ => internal}/batch_sender_test.go | 137 +++---- .../{ => internal}/obsexporter.go | 97 +++-- .../{ => internal}/obsexporter_test.go | 87 +++-- .../exporterhelper/internal/queue_sender.go | 154 ++++++++ .../{ => internal}/queue_sender_test.go | 123 +++---- .../{request_test.go => internal/request.go} | 35 +- .../exporterhelper/internal/request_sender.go | 33 ++ .../exporterhelper/internal/retry_sender.go | 142 ++++++++ .../{ => internal}/retry_sender_test.go | 91 ++--- .../exporterhelper/internal/timeout_sender.go | 52 +++ .../{ => internal}/timeout_sender_test.go | 2 +- exporter/exporterhelper/logs.go | 31 +- exporter/exporterhelper/logs_test.go | 26 +- exporter/exporterhelper/metrics.go | 31 +- exporter/exporterhelper/metrics_test.go | 26 +- exporter/exporterhelper/obsreport_test.go | 15 +- exporter/exporterhelper/queue_sender.go | 133 +------ exporter/exporterhelper/retry_sender.go | 129 +------ exporter/exporterhelper/timeout_sender.go | 43 +-- exporter/exporterhelper/traces.go | 31 +- exporter/exporterhelper/traces_test.go | 26 +- 26 files changed, 1161 insertions(+), 1041 deletions(-) create mode 100644 exporter/exporterhelper/internal/base_exporter.go rename exporter/exporterhelper/{common_test.go => internal/base_exporter_test.go} (69%) rename exporter/exporterhelper/{ => internal}/batch_sender.go (81%) rename exporter/exporterhelper/{ => internal}/batch_sender_test.go (83%) rename exporter/exporterhelper/{ => internal}/obsexporter.go (52%) rename exporter/exporterhelper/{ => internal}/obsexporter_test.go (73%) create mode 100644 exporter/exporterhelper/internal/queue_sender.go rename exporter/exporterhelper/{ => internal}/queue_sender_test.go (77%) rename exporter/exporterhelper/{request_test.go => internal/request.go} (71%) create mode 100644 exporter/exporterhelper/internal/request_sender.go create mode 100644 exporter/exporterhelper/internal/retry_sender.go rename exporter/exporterhelper/{ => internal}/retry_sender_test.go (78%) create mode 100644 exporter/exporterhelper/internal/timeout_sender.go rename exporter/exporterhelper/{ => internal}/timeout_sender_test.go (95%) diff --git a/exporter/exporterhelper/common.go b/exporter/exporterhelper/common.go index 5b8a2a51767..f3e9f06b2ec 100644 --- a/exporter/exporterhelper/common.go +++ b/exporter/exporterhelper/common.go @@ -4,123 +4,46 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "fmt" - - "go.uber.org/multierr" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" ) -// requestSender is an abstraction of a sender for a request independent of the type of the data (traces, metrics, logs). -type requestSender interface { - component.Component - send(context.Context, Request) error - setNextSender(nextSender requestSender) -} - -type baseRequestSender struct { - component.StartFunc - component.ShutdownFunc - nextSender requestSender -} - -var _ requestSender = (*baseRequestSender)(nil) - -func (b *baseRequestSender) send(ctx context.Context, req Request) error { - return b.nextSender.send(ctx, req) -} - -func (b *baseRequestSender) setNextSender(nextSender requestSender) { - b.nextSender = nextSender -} - -type obsrepSenderFactory func(obsrep *obsReport) requestSender - -// Option apply changes to baseExporter. -type Option interface { - apply(*baseExporter) error -} - -type optionFunc func(*baseExporter) error - -func (of optionFunc) apply(e *baseExporter) error { - return of(e) -} +// Option apply changes to BaseExporter. +type Option = internal.Option // WithStart overrides the default Start function for an exporter. // The default start function does nothing and always returns nil. func WithStart(start component.StartFunc) Option { - return optionFunc(func(o *baseExporter) error { - o.StartFunc = start - return nil - }) + return internal.WithStart(start) } // WithShutdown overrides the default Shutdown function for an exporter. // The default shutdown function does nothing and always returns nil. func WithShutdown(shutdown component.ShutdownFunc) Option { - return optionFunc(func(o *baseExporter) error { - o.ShutdownFunc = shutdown - return nil - }) + return internal.WithShutdown(shutdown) } // WithTimeout overrides the default TimeoutSettings for an exporter. // The default TimeoutSettings is 5 seconds. func WithTimeout(timeoutConfig TimeoutConfig) Option { - return optionFunc(func(o *baseExporter) error { - o.timeoutSender.cfg = timeoutConfig - return nil - }) + return internal.WithTimeout(timeoutConfig) } // WithRetry overrides the default configretry.BackOffConfig for an exporter. // The default configretry.BackOffConfig is to disable retries. func WithRetry(config configretry.BackOffConfig) Option { - return optionFunc(func(o *baseExporter) error { - if !config.Enabled { - o.exportFailureMessage += " Try enabling retry_on_failure config option to retry on retryable errors." - return nil - } - o.retrySender = newRetrySender(config, o.set) - return nil - }) + return internal.WithRetry(config) } // WithQueue overrides the default QueueConfig for an exporter. // The default QueueConfig is to disable queueing. // This option cannot be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. -func WithQueue(config QueueConfig) Option { - return optionFunc(func(o *baseExporter) error { - if o.marshaler == nil || o.unmarshaler == nil { - return fmt.Errorf("WithQueue option is not available for the new request exporters, use WithRequestQueue instead") - } - if !config.Enabled { - o.exportFailureMessage += " Try enabling sending_queue to survive temporary failures." - return nil - } - qf := exporterqueue.NewPersistentQueueFactory[Request](config.StorageID, exporterqueue.PersistentQueueSettings[Request]{ - Marshaler: o.marshaler, - Unmarshaler: o.unmarshaler, - }) - q := qf(context.Background(), exporterqueue.Settings{ - DataType: o.signal, - ExporterSettings: o.set, - }, exporterqueue.Config{ - Enabled: config.Enabled, - NumConsumers: config.NumConsumers, - QueueSize: config.QueueSize, - }) - o.queueSender = newQueueSender(q, o.set, config.NumConsumers, o.exportFailureMessage, o.obsrep) - return nil - }) +func WithQueue(config internal.QueueConfig) Option { + return internal.WithQueue(config) } // WithRequestQueue enables queueing for an exporter. @@ -128,54 +51,22 @@ func WithQueue(config QueueConfig) Option { // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. func WithRequestQueue(cfg exporterqueue.Config, queueFactory exporterqueue.Factory[Request]) Option { - return optionFunc(func(o *baseExporter) error { - if o.marshaler != nil || o.unmarshaler != nil { - return fmt.Errorf("WithRequestQueue option must be used with the new request exporters only, use WithQueue instead") - } - if !cfg.Enabled { - o.exportFailureMessage += " Try enabling sending_queue to survive temporary failures." - return nil - } - o.queueCfg = cfg - o.queueFactory = queueFactory - return nil - }) + return internal.WithRequestQueue(cfg, queueFactory) } // WithCapabilities overrides the default Capabilities() function for a Consumer. // The default is non-mutable data. // TODO: Verify if we can change the default to be mutable as we do for processors. func WithCapabilities(capabilities consumer.Capabilities) Option { - return optionFunc(func(o *baseExporter) error { - o.consumerOptions = append(o.consumerOptions, consumer.WithCapabilities(capabilities)) - return nil - }) + return internal.WithCapabilities(capabilities) } // BatcherOption apply changes to batcher sender. -type BatcherOption interface { - apply(*batchSender) error -} - -type batcherOptionFunc func(*batchSender) error - -func (of batcherOptionFunc) apply(e *batchSender) error { - return of(e) -} +type BatcherOption = internal.BatcherOption // WithRequestBatchFuncs sets the functions for merging and splitting batches for an exporter built for custom request types. func WithRequestBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) BatcherOption { - return batcherOptionFunc(func(bs *batchSender) error { - if mf == nil || msf == nil { - return fmt.Errorf("WithRequestBatchFuncs must be provided with non-nil functions") - } - if bs.mergeFunc != nil || bs.mergeSplitFunc != nil { - return fmt.Errorf("WithRequestBatchFuncs can only be used once with request-based exporters") - } - bs.mergeFunc = mf - bs.mergeSplitFunc = msf - return nil - }) + return internal.WithRequestBatchFuncs(mf, msf) } // WithBatcher enables batching for an exporter based on custom request types. @@ -184,196 +75,5 @@ func WithRequestBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf expor // This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. func WithBatcher(cfg exporterbatcher.Config, opts ...BatcherOption) Option { - return optionFunc(func(o *baseExporter) error { - if !cfg.Enabled { - return nil - } - - bs := newBatchSender(cfg, o.set, o.batchMergeFunc, o.batchMergeSplitfunc) - for _, opt := range opts { - if err := opt.apply(bs); err != nil { - return err - } - } - if bs.mergeFunc == nil || bs.mergeSplitFunc == nil { - return fmt.Errorf("WithRequestBatchFuncs must be provided for the batcher applied to the request-based exporters") - } - o.batchSender = bs - return nil - }) -} - -// withMarshaler is used to set the request marshaler for the new exporter helper. -// It must be provided as the first option when creating a new exporter helper. -func withMarshaler(marshaler exporterqueue.Marshaler[Request]) Option { - return optionFunc(func(o *baseExporter) error { - o.marshaler = marshaler - return nil - }) -} - -// withUnmarshaler is used to set the request unmarshaler for the new exporter helper. -// It must be provided as the first option when creating a new exporter helper. -func withUnmarshaler(unmarshaler exporterqueue.Unmarshaler[Request]) Option { - return optionFunc(func(o *baseExporter) error { - o.unmarshaler = unmarshaler - return nil - }) -} - -// withBatchFuncs is used to set the functions for merging and splitting batches for OLTP-based exporters. -// It must be provided as the first option when creating a new exporter helper. -func withBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) Option { - return optionFunc(func(o *baseExporter) error { - o.batchMergeFunc = mf - o.batchMergeSplitfunc = msf - return nil - }) -} - -// baseExporter contains common fields between different exporter types. -type baseExporter struct { - component.StartFunc - component.ShutdownFunc - - signal component.DataType - - batchMergeFunc exporterbatcher.BatchMergeFunc[Request] - batchMergeSplitfunc exporterbatcher.BatchMergeSplitFunc[Request] - - marshaler exporterqueue.Marshaler[Request] - unmarshaler exporterqueue.Unmarshaler[Request] - - set exporter.Settings - obsrep *obsReport - - // Message for the user to be added with an export failure message. - exportFailureMessage string - - // Chain of senders that the exporter helper applies before passing the data to the actual exporter. - // The data is handled by each sender in the respective order starting from the queueSender. - // Most of the senders are optional, and initialized with a no-op path-through sender. - batchSender requestSender - queueSender requestSender - obsrepSender requestSender - retrySender requestSender - timeoutSender *timeoutSender // timeoutSender is always initialized. - - consumerOptions []consumer.Option - - queueCfg exporterqueue.Config - queueFactory exporterqueue.Factory[Request] - batcherCfg exporterbatcher.Config - batcherOpts []BatcherOption -} - -func newBaseExporter(set exporter.Settings, signal component.DataType, osf obsrepSenderFactory, options ...Option) (*baseExporter, error) { - obsReport, err := newExporter(obsReportSettings{exporterID: set.ID, exporterCreateSettings: set, dataType: signal}) - if err != nil { - return nil, err - } - - be := &baseExporter{ - signal: signal, - - batchSender: &baseRequestSender{}, - queueSender: &baseRequestSender{}, - obsrepSender: osf(obsReport), - retrySender: &baseRequestSender{}, - timeoutSender: &timeoutSender{cfg: NewDefaultTimeoutConfig()}, - - set: set, - obsrep: obsReport, - } - - for _, op := range options { - err = multierr.Append(err, op.apply(be)) - } - if err != nil { - return nil, err - } - - if be.batcherCfg.Enabled { - bs := newBatchSender(be.batcherCfg, be.set, be.batchMergeFunc, be.batchMergeSplitfunc) - for _, opt := range be.batcherOpts { - err = multierr.Append(err, opt.apply(bs)) - } - if bs.mergeFunc == nil || bs.mergeSplitFunc == nil { - err = multierr.Append(err, fmt.Errorf("WithRequestBatchFuncs must be provided for the batcher applied to the request-based exporters")) - } - be.batchSender = bs - } - - if be.queueCfg.Enabled { - set := exporterqueue.Settings{ - DataType: be.signal, - ExporterSettings: be.set, - } - be.queueSender = newQueueSender(be.queueFactory(context.Background(), set, be.queueCfg), be.set, be.queueCfg.NumConsumers, be.exportFailureMessage, be.obsrep) - for _, op := range options { - err = multierr.Append(err, op.apply(be)) - } - } - - if err != nil { - return nil, err - } - - be.connectSenders() - - if bs, ok := be.batchSender.(*batchSender); ok { - // If queue sender is enabled assign to the batch sender the same number of workers. - if qs, ok := be.queueSender.(*queueSender); ok { - bs.concurrencyLimit = int64(qs.numConsumers) - } - // Batcher sender mutates the data. - be.consumerOptions = append(be.consumerOptions, consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})) - } - - return be, nil -} - -// send sends the request using the first sender in the chain. -func (be *baseExporter) send(ctx context.Context, req Request) error { - err := be.queueSender.send(ctx, req) - if err != nil { - be.set.Logger.Error("Exporting failed. Rejecting data."+be.exportFailureMessage, - zap.Error(err), zap.Int("rejected_items", req.ItemsCount())) - } - return err -} - -// connectSenders connects the senders in the predefined order. -func (be *baseExporter) connectSenders() { - be.queueSender.setNextSender(be.batchSender) - be.batchSender.setNextSender(be.obsrepSender) - be.obsrepSender.setNextSender(be.retrySender) - be.retrySender.setNextSender(be.timeoutSender) -} - -func (be *baseExporter) Start(ctx context.Context, host component.Host) error { - // First start the wrapped exporter. - if err := be.StartFunc.Start(ctx, host); err != nil { - return err - } - - // If no error then start the batchSender. - if err := be.batchSender.Start(ctx, host); err != nil { - return err - } - - // Last start the queueSender. - return be.queueSender.Start(ctx, host) -} - -func (be *baseExporter) Shutdown(ctx context.Context) error { - return multierr.Combine( - // First shutdown the retry sender, so the queue sender can flush the queue without retries. - be.retrySender.Shutdown(ctx), - // Then shutdown the batch sender - be.batchSender.Shutdown(ctx), - // Then shutdown the queue sender. - be.queueSender.Shutdown(ctx), - // Last shutdown the wrapped exporter itself. - be.ShutdownFunc.Shutdown(ctx)) + return internal.WithBatcher(cfg, opts...) } diff --git a/exporter/exporterhelper/exporterhelper.go b/exporter/exporterhelper/exporterhelper.go index 0890ec71af1..d9e90d821d9 100644 --- a/exporter/exporterhelper/exporterhelper.go +++ b/exporter/exporterhelper/exporterhelper.go @@ -16,9 +16,3 @@ type Request = internal.Request // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. type RequestErrorHandler = internal.RequestErrorHandler - -// extractPartialRequest returns a new Request that may contain the items left to be sent -// if only some items failed to process and can be retried. Otherwise, it returns the original Request. -func extractPartialRequest(req Request, err error) Request { - return internal.ExtractPartialRequest(req, err) -} diff --git a/exporter/exporterhelper/internal/base_exporter.go b/exporter/exporterhelper/internal/base_exporter.go new file mode 100644 index 00000000000..922dbc9b34a --- /dev/null +++ b/exporter/exporterhelper/internal/base_exporter.go @@ -0,0 +1,341 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/codes" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.uber.org/multierr" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterqueue" // BaseExporter contains common fields between different exporter types. + "go.opentelemetry.io/collector/exporter/internal" +) + +type ObsrepSenderFactory = func(obsrep *ObsReport) RequestSender + +// Option apply changes to BaseExporter. +type Option func(*BaseExporter) error + +// BatcherOption apply changes to batcher sender. +type BatcherOption func(*BatchSender) error + +type BaseExporter struct { + component.StartFunc + component.ShutdownFunc + + Signal component.DataType + + BatchMergeFunc exporterbatcher.BatchMergeFunc[internal.Request] + BatchMergeSplitfunc exporterbatcher.BatchMergeSplitFunc[internal.Request] + + Marshaler exporterqueue.Marshaler[internal.Request] + Unmarshaler exporterqueue.Unmarshaler[internal.Request] + + Set exporter.Settings + Obsrep *ObsReport + + // Message for the user to be added with an export failure message. + ExportFailureMessage string + + // Chain of senders that the exporter helper applies before passing the data to the actual exporter. + // The data is handled by each sender in the respective order starting from the queueSender. + // Most of the senders are optional, and initialized with a no-op path-through sender. + BatchSender RequestSender + QueueSender RequestSender + ObsrepSender RequestSender + RetrySender RequestSender + TimeoutSender *TimeoutSender // TimeoutSender is always initialized. + + ConsumerOptions []consumer.Option + + QueueCfg exporterqueue.Config + QueueFactory exporterqueue.Factory[internal.Request] + BatcherCfg exporterbatcher.Config + BatcherOpts []BatcherOption +} + +func NewBaseExporter(set exporter.Settings, signal component.DataType, osf ObsrepSenderFactory, options ...Option) (*BaseExporter, error) { + obsReport, err := NewExporter(ObsReportSettings{ExporterID: set.ID, ExporterCreateSettings: set, DataType: signal}) + if err != nil { + return nil, err + } + + be := &BaseExporter{ + Signal: signal, + + BatchSender: &BaseRequestSender{}, + QueueSender: &BaseRequestSender{}, + ObsrepSender: osf(obsReport), + RetrySender: &BaseRequestSender{}, + TimeoutSender: &TimeoutSender{cfg: NewDefaultTimeoutConfig()}, + + Set: set, + Obsrep: obsReport, + } + + for _, op := range options { + err = multierr.Append(err, op(be)) + } + if err != nil { + return nil, err + } + + if be.BatcherCfg.Enabled { + bs := NewBatchSender(be.BatcherCfg, be.Set, be.BatchMergeFunc, be.BatchMergeSplitfunc) + for _, opt := range be.BatcherOpts { + err = multierr.Append(err, opt(bs)) + } + if bs.mergeFunc == nil || bs.mergeSplitFunc == nil { + err = multierr.Append(err, fmt.Errorf("WithRequestBatchFuncs must be provided for the batcher applied to the request-based exporters")) + } + be.BatchSender = bs + } + + if be.QueueCfg.Enabled { + set := exporterqueue.Settings{ + DataType: be.Signal, + ExporterSettings: be.Set, + } + be.QueueSender = NewQueueSender(be.QueueFactory(context.Background(), set, be.QueueCfg), be.Set, be.QueueCfg.NumConsumers, be.ExportFailureMessage, be.Obsrep) + for _, op := range options { + err = multierr.Append(err, op(be)) + } + } + + if err != nil { + return nil, err + } + + be.connectSenders() + + if bs, ok := be.BatchSender.(*BatchSender); ok { + // If queue sender is enabled assign to the batch sender the same number of workers. + if qs, ok := be.QueueSender.(*QueueSender); ok { + bs.concurrencyLimit = int64(qs.numConsumers) + } + // Batcher sender mutates the data. + be.ConsumerOptions = append(be.ConsumerOptions, consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})) + } + + return be, nil +} + +// send sends the request using the first sender in the chain. +func (be *BaseExporter) Send(ctx context.Context, req internal.Request) error { + err := be.QueueSender.Send(ctx, req) + if err != nil { + be.Set.Logger.Error("Exporting failed. Rejecting data."+be.ExportFailureMessage, + zap.Error(err), zap.Int("rejected_items", req.ItemsCount())) + } + return err +} + +// connectSenders connects the senders in the predefined order. +func (be *BaseExporter) connectSenders() { + be.QueueSender.SetNextSender(be.BatchSender) + be.BatchSender.SetNextSender(be.ObsrepSender) + be.ObsrepSender.SetNextSender(be.RetrySender) + be.RetrySender.SetNextSender(be.TimeoutSender) +} + +func (be *BaseExporter) Start(ctx context.Context, host component.Host) error { + // First start the wrapped exporter. + if err := be.StartFunc.Start(ctx, host); err != nil { + return err + } + + // If no error then start the BatchSender. + if err := be.BatchSender.Start(ctx, host); err != nil { + return err + } + + // Last start the queueSender. + return be.QueueSender.Start(ctx, host) +} + +func (be *BaseExporter) Shutdown(ctx context.Context) error { + return multierr.Combine( + // First shutdown the retry sender, so the queue sender can flush the queue without retries. + be.RetrySender.Shutdown(ctx), + // Then shutdown the batch sender + be.BatchSender.Shutdown(ctx), + // Then shutdown the queue sender. + be.QueueSender.Shutdown(ctx), + // Last shutdown the wrapped exporter itself. + be.ShutdownFunc.Shutdown(ctx)) +} + +// WithStart overrides the default Start function for an exporter. +// The default start function does nothing and always returns nil. +func WithStart(start component.StartFunc) Option { + return func(o *BaseExporter) error { + o.StartFunc = start + return nil + } +} + +// WithShutdown overrides the default Shutdown function for an exporter. +// The default shutdown function does nothing and always returns nil. +func WithShutdown(shutdown component.ShutdownFunc) Option { + return func(o *BaseExporter) error { + o.ShutdownFunc = shutdown + return nil + } +} + +// WithTimeout overrides the default TimeoutConfig for an exporter. +// The default TimeoutConfig is 5 seconds. +func WithTimeout(timeoutConfig TimeoutConfig) Option { + return func(o *BaseExporter) error { + o.TimeoutSender.cfg = timeoutConfig + return nil + } +} + +// WithRetry overrides the default configretry.BackOffConfig for an exporter. +// The default configretry.BackOffConfig is to disable retries. +func WithRetry(config configretry.BackOffConfig) Option { + return func(o *BaseExporter) error { + if !config.Enabled { + o.ExportFailureMessage += " Try enabling retry_on_failure config option to retry on retryable errors." + return nil + } + o.RetrySender = newRetrySender(config, o.Set) + return nil + } +} + +// WithQueue overrides the default QueueConfig for an exporter. +// The default QueueConfig is to disable queueing. +// This option cannot be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. +func WithQueue(config QueueConfig) Option { + return func(o *BaseExporter) error { + if o.Marshaler == nil || o.Unmarshaler == nil { + return fmt.Errorf("WithQueue option is not available for the new request exporters, use WithRequestQueue instead") + } + if !config.Enabled { + o.ExportFailureMessage += " Try enabling sending_queue to survive temporary failures." + return nil + } + qf := exporterqueue.NewPersistentQueueFactory[internal.Request](config.StorageID, exporterqueue.PersistentQueueSettings[internal.Request]{ + Marshaler: o.Marshaler, + Unmarshaler: o.Unmarshaler, + }) + q := qf(context.Background(), exporterqueue.Settings{ + DataType: o.Signal, + ExporterSettings: o.Set, + }, exporterqueue.Config{ + Enabled: config.Enabled, + NumConsumers: config.NumConsumers, + QueueSize: config.QueueSize, + }) + o.QueueSender = NewQueueSender(q, o.Set, config.NumConsumers, o.ExportFailureMessage, o.Obsrep) + return nil + } +} + +// WithRequestQueue enables queueing for an exporter. +// This option should be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func WithRequestQueue(cfg exporterqueue.Config, queueFactory exporterqueue.Factory[internal.Request]) Option { + return func(o *BaseExporter) error { + if o.Marshaler != nil || o.Unmarshaler != nil { + return fmt.Errorf("WithRequestQueue option must be used with the new request exporters only, use WithQueue instead") + } + if !cfg.Enabled { + o.ExportFailureMessage += " Try enabling sending_queue to survive temporary failures." + return nil + } + o.QueueCfg = cfg + o.QueueFactory = queueFactory + return nil + } +} + +// WithCapabilities overrides the default Capabilities() function for a Consumer. +// The default is non-mutable data. +// TODO: Verify if we can change the default to be mutable as we do for processors. +func WithCapabilities(capabilities consumer.Capabilities) Option { + return func(o *BaseExporter) error { + o.ConsumerOptions = append(o.ConsumerOptions, consumer.WithCapabilities(capabilities)) + return nil + } +} + +// WithRequestBatchFuncs sets the functions for merging and splitting batches for an exporter built for custom request types. +func WithRequestBatchFuncs(mf exporterbatcher.BatchMergeFunc[internal.Request], msf exporterbatcher.BatchMergeSplitFunc[internal.Request]) BatcherOption { + return func(bs *BatchSender) error { + if mf == nil || msf == nil { + return fmt.Errorf("WithRequestBatchFuncs must be provided with non-nil functions") + } + if bs.mergeFunc != nil || bs.mergeSplitFunc != nil { + return fmt.Errorf("WithRequestBatchFuncs can only be used once with request-based exporters") + } + bs.mergeFunc = mf + bs.mergeSplitFunc = msf + return nil + } +} + +// WithBatcher enables batching for an exporter based on custom request types. +// For now, it can be used only with the New[Traces|Metrics|Logs]RequestExporter exporter helpers and +// WithRequestBatchFuncs provided. +// This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func WithBatcher(cfg exporterbatcher.Config, opts ...BatcherOption) Option { + return func(o *BaseExporter) error { + o.BatcherCfg = cfg + o.BatcherOpts = opts + return nil + } +} + +// WithMarshaler is used to set the request marshaler for the new exporter helper. +// It must be provided as the first option when creating a new exporter helper. +func WithMarshaler(marshaler exporterqueue.Marshaler[internal.Request]) Option { + return func(o *BaseExporter) error { + o.Marshaler = marshaler + return nil + } +} + +// withUnmarshaler is used to set the request unmarshaler for the new exporter helper. +// It must be provided as the first option when creating a new exporter helper. +func WithUnmarshaler(unmarshaler exporterqueue.Unmarshaler[internal.Request]) Option { + return func(o *BaseExporter) error { + o.Unmarshaler = unmarshaler + return nil + } +} + +// withBatchFuncs is used to set the functions for merging and splitting batches for OLTP-based exporters. +// It must be provided as the first option when creating a new exporter helper. +func WithBatchFuncs(mf exporterbatcher.BatchMergeFunc[internal.Request], msf exporterbatcher.BatchMergeSplitFunc[internal.Request]) Option { + return func(o *BaseExporter) error { + o.BatchMergeFunc = mf + o.BatchMergeSplitfunc = msf + return nil + } +} + +func CheckStatus(t *testing.T, sd sdktrace.ReadOnlySpan, err error) { + if err != nil { + require.Equal(t, codes.Error, sd.Status().Code, "SpanData %v", sd) + require.Equal(t, err.Error(), sd.Status().Description, "SpanData %v", sd) + } else { + require.Equal(t, codes.Unset, sd.Status().Code, "SpanData %v", sd) + } +} diff --git a/exporter/exporterhelper/common_test.go b/exporter/exporterhelper/internal/base_exporter_test.go similarity index 69% rename from exporter/exporterhelper/common_test.go rename to exporter/exporterhelper/internal/base_exporter_test.go index 512233ae1c7..f9c5975a171 100644 --- a/exporter/exporterhelper/common_test.go +++ b/exporter/exporterhelper/internal/base_exporter_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package internal import ( "context" @@ -9,8 +9,6 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel/codes" - sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" @@ -20,6 +18,7 @@ import ( "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/exporter/internal" ) var ( @@ -33,12 +32,12 @@ var ( }() ) -func newNoopObsrepSender(*obsReport) requestSender { - return &baseRequestSender{} +func newNoopObsrepSender(*ObsReport) RequestSender { + return &BaseRequestSender{} } func TestBaseExporter(t *testing.T) { - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender) + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) require.NoError(t, be.Shutdown(context.Background())) @@ -46,7 +45,7 @@ func TestBaseExporter(t *testing.T) { func TestBaseExporterWithOptions(t *testing.T) { want := errors.New("my error") - be, err := newBaseExporter( + be, err := NewBaseExporter( defaultSettings, defaultDataType, newNoopObsrepSender, WithStart(func(context.Context, component.Host) error { return want }), WithShutdown(func(context.Context) error { return want }), @@ -57,29 +56,20 @@ func TestBaseExporterWithOptions(t *testing.T) { require.Equal(t, want, be.Shutdown(context.Background())) } -func checkStatus(t *testing.T, sd sdktrace.ReadOnlySpan, err error) { - if err != nil { - require.Equal(t, codes.Error, sd.Status().Code, "SpanData %v", sd) - require.Equal(t, err.Error(), sd.Status().Description, "SpanData %v", sd) - } else { - require.Equal(t, codes.Unset, sd.Status().Code, "SpanData %v", sd) - } -} - func TestQueueOptionsWithRequestExporter(t *testing.T) { - bs, err := newBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, + bs, err := NewBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, WithRetry(configretry.NewDefaultBackOffConfig())) require.NoError(t, err) - require.Nil(t, bs.marshaler) - require.Nil(t, bs.unmarshaler) - _, err = newBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, + require.Nil(t, bs.Marshaler) + require.Nil(t, bs.Unmarshaler) + _, err = NewBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, WithRetry(configretry.NewDefaultBackOffConfig()), WithQueue(NewDefaultQueueConfig())) require.Error(t, err) - _, err = newBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + _, err = NewBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(configretry.NewDefaultBackOffConfig()), - WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[Request]())) + WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[internal.Request]())) require.Error(t, err) } @@ -89,9 +79,9 @@ func TestBaseExporterLogging(t *testing.T) { set.Logger = zap.New(logger) rCfg := configretry.NewDefaultBackOffConfig() rCfg.Enabled = false - bs, err := newBaseExporter(set, defaultDataType, newNoopObsrepSender, WithRetry(rCfg)) + bs, err := NewBaseExporter(set, defaultDataType, newNoopObsrepSender, WithRetry(rCfg)) require.NoError(t, err) - sendErr := bs.send(context.Background(), newErrorRequest()) + sendErr := bs.Send(context.Background(), newErrorRequest()) require.Error(t, sendErr) require.Len(t, observed.FilterLevelExact(zap.ErrorLevel).All(), 1) diff --git a/exporter/exporterhelper/batch_sender.go b/exporter/exporterhelper/internal/batch_sender.go similarity index 81% rename from exporter/exporterhelper/batch_sender.go rename to exporter/exporterhelper/internal/batch_sender.go index 4d9635195e2..65d7e0965f7 100644 --- a/exporter/exporterhelper/batch_sender.go +++ b/exporter/exporterhelper/internal/batch_sender.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" import ( "context" @@ -14,18 +14,19 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/internal" ) -// batchSender is a component that places requests into batches before passing them to the downstream senders. +// BatchSender is a component that places requests into batches before passing them to the downstream senders. // Batches are sent out with any of the following conditions: // - batch size reaches cfg.MinSizeItems // - cfg.FlushTimeout is elapsed since the timestamp when the previous batch was sent out. // - concurrencyLimit is reached. -type batchSender struct { - baseRequestSender +type BatchSender struct { + BaseRequestSender cfg exporterbatcher.Config - mergeFunc exporterbatcher.BatchMergeFunc[Request] - mergeSplitFunc exporterbatcher.BatchMergeSplitFunc[Request] + mergeFunc exporterbatcher.BatchMergeFunc[internal.Request] + mergeSplitFunc exporterbatcher.BatchMergeSplitFunc[internal.Request] // concurrencyLimit is the maximum number of goroutines that can be blocked by the batcher. // If this number is reached and all the goroutines are busy, the batch will be sent right away. @@ -45,9 +46,9 @@ type batchSender struct { } // newBatchSender returns a new batch consumer component. -func newBatchSender(cfg exporterbatcher.Config, set exporter.Settings, - mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) *batchSender { - bs := &batchSender{ +func NewBatchSender(cfg exporterbatcher.Config, set exporter.Settings, + mf exporterbatcher.BatchMergeFunc[internal.Request], msf exporterbatcher.BatchMergeSplitFunc[internal.Request]) *BatchSender { + bs := &BatchSender{ activeBatch: newEmptyBatch(), cfg: cfg, logger: set.Logger, @@ -60,7 +61,7 @@ func newBatchSender(cfg exporterbatcher.Config, set exporter.Settings, return bs } -func (bs *batchSender) Start(_ context.Context, _ component.Host) error { +func (bs *BatchSender) Start(_ context.Context, _ component.Host) error { bs.shutdownCh = make(chan struct{}) timer := time.NewTimer(bs.cfg.FlushTimeout) go func() { @@ -103,7 +104,7 @@ func (bs *batchSender) Start(_ context.Context, _ component.Host) error { type batch struct { ctx context.Context - request Request + request internal.Request done chan struct{} err error @@ -121,9 +122,9 @@ func newEmptyBatch() *batch { // exportActiveBatch exports the active batch asynchronously and replaces it with a new one. // Caller must hold the lock. -func (bs *batchSender) exportActiveBatch() { +func (bs *BatchSender) exportActiveBatch() { go func(b *batch) { - b.err = bs.nextSender.send(b.ctx, b.request) + b.err = bs.NextSender.Send(b.ctx, b.request) close(b.done) bs.activeRequests.Add(-b.requestsBlocked) }(bs.activeBatch) @@ -134,15 +135,15 @@ func (bs *batchSender) exportActiveBatch() { // isActiveBatchReady returns true if the active batch is ready to be exported. // The batch is ready if it has reached the minimum size or the concurrency limit is reached. // Caller must hold the lock. -func (bs *batchSender) isActiveBatchReady() bool { +func (bs *BatchSender) isActiveBatchReady() bool { return bs.activeBatch.request.ItemsCount() >= bs.cfg.MinSizeItems || (bs.concurrencyLimit > 0 && bs.activeRequests.Load() >= bs.concurrencyLimit) } -func (bs *batchSender) send(ctx context.Context, req Request) error { +func (bs *BatchSender) Send(ctx context.Context, req internal.Request) error { // Stopped batch sender should act as pass-through to allow the queue to be drained. if bs.stopped.Load() { - return bs.nextSender.send(ctx, req) + return bs.NextSender.Send(ctx, req) } if bs.cfg.MaxSizeItems > 0 { @@ -152,7 +153,7 @@ func (bs *batchSender) send(ctx context.Context, req Request) error { } // sendMergeSplitBatch sends the request to the batch which may be split into multiple requests. -func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) error { +func (bs *BatchSender) sendMergeSplitBatch(ctx context.Context, req internal.Request) error { bs.mu.Lock() reqs, err := bs.mergeSplitFunc(ctx, bs.cfg.MaxSizeConfig, bs.activeBatch.request, req) @@ -187,7 +188,7 @@ func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) err // Intentionally do not put the last request in the active batch to not block it. // TODO: Consider including the partial request in the error to avoid double publishing. for _, r := range reqs { - if err := bs.nextSender.send(ctx, r); err != nil { + if err := bs.NextSender.Send(ctx, r); err != nil { return err } } @@ -195,7 +196,7 @@ func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) err } // sendMergeBatch sends the request to the batch and waits for the batch to be exported. -func (bs *batchSender) sendMergeBatch(ctx context.Context, req Request) error { +func (bs *BatchSender) sendMergeBatch(ctx context.Context, req internal.Request) error { bs.mu.Lock() if bs.activeBatch.request != nil { @@ -223,14 +224,14 @@ func (bs *batchSender) sendMergeBatch(ctx context.Context, req Request) error { // The context is only set once and is not updated after the first call. // Merging the context would be complex and require an additional goroutine to handle the context cancellation. // We take the approach of using the context from the first request since it's likely to have the shortest timeout. -func (bs *batchSender) updateActiveBatch(ctx context.Context, req Request) { +func (bs *BatchSender) updateActiveBatch(ctx context.Context, req internal.Request) { if bs.activeBatch.request == nil { bs.activeBatch.ctx = ctx } bs.activeBatch.request = req } -func (bs *batchSender) Shutdown(context.Context) error { +func (bs *BatchSender) Shutdown(context.Context) error { bs.stopped.Store(true) if bs.shutdownCh != nil { close(bs.shutdownCh) diff --git a/exporter/exporterhelper/batch_sender_test.go b/exporter/exporterhelper/internal/batch_sender_test.go similarity index 83% rename from exporter/exporterhelper/batch_sender_test.go rename to exporter/exporterhelper/internal/batch_sender_test.go index 0c4823c0e8b..53ef9451137 100644 --- a/exporter/exporterhelper/batch_sender_test.go +++ b/exporter/exporterhelper/internal/batch_sender_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package internal import ( "context" @@ -17,6 +17,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/exporterqueue" + "go.opentelemetry.io/collector/exporter/internal" ) func TestBatchSender_Merge(t *testing.T) { @@ -55,23 +56,23 @@ func TestBatchSender_Merge(t *testing.T) { sink := newFakeRequestSink() - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 8, sink: sink})) - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 3, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 3, sink: sink})) // the first two requests should be merged into one and sent by reaching the minimum items size assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 1 && sink.itemsCount.Load() == 11 }, 50*time.Millisecond, 10*time.Millisecond) - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 3, sink: sink})) - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 3, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink})) // the third and fifth requests should be sent by reaching the timeout // the fourth request should be ignored because of the merge error. time.Sleep(50 * time.Millisecond) // should be ignored because of the merge error. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 3, sink: sink, + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 3, sink: sink, mergeErr: errors.New("merge error")})) assert.Equal(t, uint64(1), sink.requestsCount.Load()) @@ -125,8 +126,8 @@ func TestBatchSender_BatchExportError(t *testing.T) { sink := newFakeRequestSink() - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) // the first two requests should be blocked by the batchSender. time.Sleep(50 * time.Millisecond) @@ -134,14 +135,14 @@ func TestBatchSender_BatchExportError(t *testing.T) { // the third request should trigger the export and cause an error. errReq := &fakeRequest{items: 20, exportErr: errors.New("transient error"), sink: sink} - require.NoError(t, be.send(context.Background(), errReq)) + require.NoError(t, be.Send(context.Background(), errReq)) // the batch should be dropped since the queue doesn't have requeuing enabled. assert.Eventually(t, func() bool { return sink.requestsCount.Load() == tt.expectedRequests && sink.itemsCount.Load() == tt.expectedItems && - be.batchSender.(*batchSender).activeRequests.Load() == 0 && - be.queueSender.(*queueSender).queue.Size() == 0 + be.BatchSender.(*BatchSender).activeRequests.Load() == 0 && + be.QueueSender.(*QueueSender).queue.Size() == 0 }, 100*time.Millisecond, 10*time.Millisecond) }) } @@ -162,24 +163,24 @@ func TestBatchSender_MergeOrSplit(t *testing.T) { sink := newFakeRequestSink() // should be sent right away by reaching the minimum items size. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 8, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 1 && sink.itemsCount.Load() == 8 }, 50*time.Millisecond, 10*time.Millisecond) // big request should be broken down into two requests, both are sent right away. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 17, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 17, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 3 && sink.itemsCount.Load() == 25 }, 50*time.Millisecond, 10*time.Millisecond) // request that cannot be split should be dropped. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 11, sink: sink, + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 11, sink: sink, mergeErr: errors.New("split error")})) // big request should be broken down into two requests, both are sent right away. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 13, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 13, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 5 && sink.itemsCount.Load() == 38 @@ -194,7 +195,7 @@ func TestBatchSender_Shutdown(t *testing.T) { require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) sink := newFakeRequestSink() - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 3, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 3, sink: sink})) // To make the request reached the batchSender before shutdown. time.Sleep(50 * time.Millisecond) @@ -210,7 +211,7 @@ func TestBatchSender_Disabled(t *testing.T) { cfg := exporterbatcher.NewDefaultConfig() cfg.Enabled = false cfg.MaxSizeItems = 5 - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(cfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NotNil(t, be) require.NoError(t, err) @@ -222,20 +223,20 @@ func TestBatchSender_Disabled(t *testing.T) { sink := newFakeRequestSink() // should be sent right away without splitting because batching is disabled. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 8, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink})) assert.Equal(t, uint64(1), sink.requestsCount.Load()) assert.Equal(t, uint64(8), sink.itemsCount.Load()) } func TestBatchSender_InvalidMergeSplitFunc(t *testing.T) { - invalidMergeSplitFunc := func(_ context.Context, _ exporterbatcher.MaxSizeConfig, _ Request, req2 Request) ([]Request, + invalidMergeSplitFunc := func(_ context.Context, _ exporterbatcher.MaxSizeConfig, _ internal.Request, req2 internal.Request) ([]internal.Request, error) { // reply with invalid 0 length slice if req2 is more than 20 items if req2.(*fakeRequest).items > 20 { - return []Request{}, nil + return []internal.Request{}, nil } // otherwise reply with a single request. - return []Request{req2}, nil + return []internal.Request{req2}, nil } cfg := exporterbatcher.NewDefaultConfig() cfg.FlushTimeout = 50 * time.Millisecond @@ -249,16 +250,16 @@ func TestBatchSender_InvalidMergeSplitFunc(t *testing.T) { sink := newFakeRequestSink() // first request should be ignored due to invalid merge/split function. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 30, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 30, sink: sink})) // second request should be sent after reaching the timeout. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 15, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 15, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 1 && sink.itemsCount.Load() == 15 }, 100*time.Millisecond, 10*time.Millisecond) } func TestBatchSender_PostShutdown(t *testing.T) { - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(exporterbatcher.NewDefaultConfig(), WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NotNil(t, be) @@ -268,7 +269,7 @@ func TestBatchSender_PostShutdown(t *testing.T) { // Closed batch sender should act as a pass-through to not block queue draining. sink := newFakeRequestSink() - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 8, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink})) assert.Equal(t, uint64(1), sink.requestsCount.Load()) assert.Equal(t, uint64(8), sink.itemsCount.Load()) } @@ -321,9 +322,9 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { t.Run(tt.name, func(t *testing.T) { qCfg := exporterqueue.NewDefaultConfig() qCfg.NumConsumers = 2 - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(tt.batcherCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc)), - WithRequestQueue(qCfg, exporterqueue.NewMemoryQueueFactory[Request]())) + WithRequestQueue(qCfg, exporterqueue.NewMemoryQueueFactory[internal.Request]())) require.NotNil(t, be) require.NoError(t, err) assert.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -333,29 +334,29 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { sink := newFakeRequestSink() // the 1st and 2nd request should be flushed in the same batched request by max concurrency limit. - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 2, sink: sink})) - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 2, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 2, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 2, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 1 && sink.itemsCount.Load() == 4 }, 100*time.Millisecond, 10*time.Millisecond) // the 3rd request should be flushed by itself due to flush interval - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 2, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 2, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 2 && sink.itemsCount.Load() == 6 }, 100*time.Millisecond, 10*time.Millisecond) // the 4th and 5th request should be flushed in the same batched request by max concurrency limit. - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 2, sink: sink})) - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 2, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 2, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 2, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 3 && sink.itemsCount.Load() == 10 }, 100*time.Millisecond, 10*time.Millisecond) // do it a few more times to ensure it produces the correct batch size regardless of goroutine scheduling. - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 5, sink: sink})) - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 6, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 5, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 6, sink: sink})) if tt.batcherCfg.MaxSizeItems == 10 { // in case of MaxSizeItems=10, wait for the leftover request to send assert.Eventually(t, func() bool { @@ -363,9 +364,9 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { }, 50*time.Millisecond, 10*time.Millisecond) } - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 6, sink: sink})) - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 20, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 6, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 20, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == tt.expectedRequests && sink.itemsCount.Load() == tt.expectedItems }, 100*time.Millisecond, 10*time.Millisecond) @@ -376,7 +377,7 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { func TestBatchSender_BatchBlocking(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 3 - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NotNil(t, be) require.NoError(t, err) @@ -389,7 +390,7 @@ func TestBatchSender_BatchBlocking(t *testing.T) { for i := 0; i < 6; i++ { wg.Add(1) go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 10 * time.Millisecond})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 10 * time.Millisecond})) wg.Done() }() } @@ -406,7 +407,7 @@ func TestBatchSender_BatchBlocking(t *testing.T) { func TestBatchSender_BatchCancelled(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 2 - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NotNil(t, be) require.NoError(t, err) @@ -419,13 +420,13 @@ func TestBatchSender_BatchCancelled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) wg.Add(1) go func() { - assert.ErrorIs(t, be.send(ctx, &fakeRequest{items: 1, sink: sink, delay: 100 * time.Millisecond}), context.Canceled) + assert.ErrorIs(t, be.Send(ctx, &fakeRequest{items: 1, sink: sink, delay: 100 * time.Millisecond}), context.Canceled) wg.Done() }() wg.Add(1) go func() { time.Sleep(20 * time.Millisecond) // ensure this call is the second - assert.ErrorIs(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 100 * time.Millisecond}), context.Canceled) + assert.ErrorIs(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 100 * time.Millisecond}), context.Canceled) wg.Done() }() cancel() // canceling the first request should cancel the whole batch @@ -441,7 +442,7 @@ func TestBatchSender_BatchCancelled(t *testing.T) { func TestBatchSender_DrainActiveRequests(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 2 - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NotNil(t, be) require.NoError(t, err) @@ -451,13 +452,13 @@ func TestBatchSender_DrainActiveRequests(t *testing.T) { // send 3 blocking requests with a timeout go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) }() go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) }() go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) }() // give time for the first two requests to be batched @@ -484,13 +485,13 @@ func TestBatchSender_WithBatcherOption(t *testing.T) { }, { name: "funcs_set_internally", - opts: []Option{withBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(exporterbatcher.NewDefaultConfig())}, + opts: []Option{WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(exporterbatcher.NewDefaultConfig())}, expectedErr: false, }, { name: "funcs_set_twice", opts: []Option{ - withBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), + WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(exporterbatcher.NewDefaultConfig(), WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc)), }, @@ -504,7 +505,7 @@ func TestBatchSender_WithBatcherOption(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, tt.opts...) + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, tt.opts...) if tt.expectedErr { assert.Nil(t, be) assert.Error(t, err) @@ -517,7 +518,7 @@ func TestBatchSender_WithBatcherOption(t *testing.T) { } func TestBatchSender_UnstartedShutdown(t *testing.T) { - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(exporterbatcher.NewDefaultConfig(), WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NoError(t, err) @@ -532,7 +533,7 @@ func TestBatchSender_ShutdownDeadlock(t *testing.T) { waitMerge := make(chan struct{}, 10) // blockedBatchMergeFunc blocks until the blockMerge channel is closed - blockedBatchMergeFunc := func(_ context.Context, r1 Request, r2 Request) (Request, error) { + blockedBatchMergeFunc := func(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { waitMerge <- struct{}{} <-blockMerge r1.(*fakeRequest).items += r2.(*fakeRequest).items @@ -541,7 +542,7 @@ func TestBatchSender_ShutdownDeadlock(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.FlushTimeout = 10 * time.Minute // high timeout to avoid the timeout to trigger - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(blockedBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -549,8 +550,8 @@ func TestBatchSender_ShutdownDeadlock(t *testing.T) { sink := newFakeRequestSink() // Send 2 concurrent requests - go func() { assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() - go func() { assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() + go func() { assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() + go func() { assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() // Wait for the requests to enter the merge function <-waitMerge @@ -577,7 +578,7 @@ func TestBatchSenderWithTimeout(t *testing.T) { bCfg.MinSizeItems = 10 tCfg := NewDefaultTimeoutConfig() tCfg.Timeout = 50 * time.Millisecond - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc)), WithTimeout(tCfg)) require.NoError(t, err) @@ -590,7 +591,7 @@ func TestBatchSenderWithTimeout(t *testing.T) { for i := 0; i < 3; i++ { wg.Add(1) go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) wg.Done() }() } @@ -602,7 +603,7 @@ func TestBatchSenderWithTimeout(t *testing.T) { for i := 0; i < 3; i++ { wg.Add(1) go func() { - assert.Error(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink, delay: 30 * time.Millisecond})) + assert.Error(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink, delay: 30 * time.Millisecond})) wg.Done() }() } @@ -616,7 +617,7 @@ func TestBatchSenderWithTimeout(t *testing.T) { } func TestBatchSenderTimerResetNoConflict(t *testing.T) { - delayBatchMergeFunc := func(_ context.Context, r1 Request, r2 Request) (Request, error) { + delayBatchMergeFunc := func(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { time.Sleep(30 * time.Millisecond) if r1 == nil { return r2, nil @@ -636,7 +637,7 @@ func TestBatchSenderTimerResetNoConflict(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 8 bCfg.FlushTimeout = 50 * time.Millisecond - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(delayBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -644,11 +645,11 @@ func TestBatchSenderTimerResetNoConflict(t *testing.T) { // Send 2 concurrent requests that should be merged in one batch in the same interval as the flush timer go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() time.Sleep(30 * time.Millisecond) go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() // The batch should be sent either with the flush interval or by reaching the minimum items size with no conflict @@ -667,7 +668,7 @@ func TestBatchSenderTimerFlush(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 8 bCfg.FlushTimeout = 100 * time.Millisecond - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -676,10 +677,10 @@ func TestBatchSenderTimerFlush(t *testing.T) { // Send 2 concurrent requests that should be merged in one batch and sent immediately go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() assert.EventuallyWithT(t, func(c *assert.CollectT) { assert.LessOrEqual(c, uint64(1), sink.requestsCount.Load()) @@ -688,7 +689,7 @@ func TestBatchSenderTimerFlush(t *testing.T) { // Send another request that should be flushed after 100ms instead of 50ms since last flush go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() // Confirm that it is not flushed in 50ms @@ -703,9 +704,9 @@ func TestBatchSenderTimerFlush(t *testing.T) { require.NoError(t, be.Shutdown(context.Background())) } -func queueBatchExporter(t *testing.T, batchOption Option) *baseExporter { - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, batchOption, - WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[Request]())) +func queueBatchExporter(t *testing.T, batchOption Option) *BaseExporter { + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, batchOption, + WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[internal.Request]())) require.NotNil(t, be) require.NoError(t, err) return be diff --git a/exporter/exporterhelper/obsexporter.go b/exporter/exporterhelper/internal/obsexporter.go similarity index 52% rename from exporter/exporterhelper/obsexporter.go rename to exporter/exporterhelper/internal/obsexporter.go index 20821d9c779..2bb60f13544 100644 --- a/exporter/exporterhelper/obsexporter.go +++ b/exporter/exporterhelper/internal/obsexporter.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" import ( "context" @@ -13,109 +13,108 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" ) -// obsReport is a helper to add observability to an exporter. -type obsReport struct { +// ObsReport is a helper to add observability to an exporter. +type ObsReport struct { spanNamePrefix string tracer trace.Tracer - dataType component.DataType + DataType component.DataType otelAttrs []attribute.KeyValue - telemetryBuilder *metadata.TelemetryBuilder + TelemetryBuilder *metadata.TelemetryBuilder } -// obsReportSettings are settings for creating an obsReport. -type obsReportSettings struct { - exporterID component.ID - exporterCreateSettings exporter.Settings - dataType component.DataType +// ObsReportSettings are settings for creating an ObsReport. +type ObsReportSettings struct { + ExporterID component.ID + ExporterCreateSettings exporter.Settings + DataType component.DataType } -func newExporter(cfg obsReportSettings) (*obsReport, error) { - telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.exporterCreateSettings.TelemetrySettings) +func NewExporter(cfg ObsReportSettings) (*ObsReport, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.ExporterCreateSettings.TelemetrySettings) if err != nil { return nil, err } - return &obsReport{ - spanNamePrefix: internal.ExporterPrefix + cfg.exporterID.String(), - tracer: cfg.exporterCreateSettings.TracerProvider.Tracer(cfg.exporterID.String()), - dataType: cfg.dataType, + return &ObsReport{ + spanNamePrefix: ExporterPrefix + cfg.ExporterID.String(), + tracer: cfg.ExporterCreateSettings.TracerProvider.Tracer(cfg.ExporterID.String()), + DataType: cfg.DataType, otelAttrs: []attribute.KeyValue{ - attribute.String(internal.ExporterKey, cfg.exporterID.String()), + attribute.String(ExporterKey, cfg.ExporterID.String()), }, - telemetryBuilder: telemetryBuilder, + TelemetryBuilder: telemetryBuilder, }, nil } -// startTracesOp is called at the start of an Export operation. +// StartTracesOp is called at the start of an Export operation. // The returned context should be used in other calls to the Exporter functions // dealing with the same export operation. -func (or *obsReport) startTracesOp(ctx context.Context) context.Context { - return or.startOp(ctx, internal.ExportTraceDataOperationSuffix) +func (or *ObsReport) StartTracesOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportTraceDataOperationSuffix) } -// endTracesOp completes the export operation that was started with startTracesOp. -func (or *obsReport) endTracesOp(ctx context.Context, numSpans int, err error) { +// EndTracesOp completes the export operation that was started with startTracesOp. +func (or *ObsReport) EndTracesOp(ctx context.Context, numSpans int, err error) { numSent, numFailedToSend := toNumItems(numSpans, err) or.recordMetrics(context.WithoutCancel(ctx), component.DataTypeTraces, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, internal.SentSpansKey, internal.FailedToSendSpansKey) + endSpan(ctx, err, numSent, numFailedToSend, SentSpansKey, FailedToSendSpansKey) } -// startMetricsOp is called at the start of an Export operation. +// StartMetricsOp is called at the start of an Export operation. // The returned context should be used in other calls to the Exporter functions // dealing with the same export operation. -func (or *obsReport) startMetricsOp(ctx context.Context) context.Context { - return or.startOp(ctx, internal.ExportMetricsOperationSuffix) +func (or *ObsReport) StartMetricsOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportMetricsOperationSuffix) } -// endMetricsOp completes the export operation that was started with +// EndMetricsOp completes the export operation that was started with // startMetricsOp. // // If needed, report your use case in https://github.com/open-telemetry/opentelemetry-collector/issues/10592. -func (or *obsReport) endMetricsOp(ctx context.Context, numMetricPoints int, err error) { +func (or *ObsReport) EndMetricsOp(ctx context.Context, numMetricPoints int, err error) { numSent, numFailedToSend := toNumItems(numMetricPoints, err) or.recordMetrics(context.WithoutCancel(ctx), component.DataTypeMetrics, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, internal.SentMetricPointsKey, internal.FailedToSendMetricPointsKey) + endSpan(ctx, err, numSent, numFailedToSend, SentMetricPointsKey, FailedToSendMetricPointsKey) } -// startLogsOp is called at the start of an Export operation. +// StartLogsOp is called at the start of an Export operation. // The returned context should be used in other calls to the Exporter functions // dealing with the same export operation. -func (or *obsReport) startLogsOp(ctx context.Context) context.Context { - return or.startOp(ctx, internal.ExportLogsOperationSuffix) +func (or *ObsReport) StartLogsOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportLogsOperationSuffix) } -// endLogsOp completes the export operation that was started with startLogsOp. -func (or *obsReport) endLogsOp(ctx context.Context, numLogRecords int, err error) { +// EndLogsOp completes the export operation that was started with startLogsOp. +func (or *ObsReport) EndLogsOp(ctx context.Context, numLogRecords int, err error) { numSent, numFailedToSend := toNumItems(numLogRecords, err) or.recordMetrics(context.WithoutCancel(ctx), component.DataTypeLogs, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, internal.SentLogRecordsKey, internal.FailedToSendLogRecordsKey) + endSpan(ctx, err, numSent, numFailedToSend, SentLogRecordsKey, FailedToSendLogRecordsKey) } // startOp creates the span used to trace the operation. Returning // the updated context and the created span. -func (or *obsReport) startOp(ctx context.Context, operationSuffix string) context.Context { +func (or *ObsReport) startOp(ctx context.Context, operationSuffix string) context.Context { spanName := or.spanNamePrefix + operationSuffix ctx, _ = or.tracer.Start(ctx, spanName) return ctx } -func (or *obsReport) recordMetrics(ctx context.Context, dataType component.DataType, sent, failed int64) { +func (or *ObsReport) recordMetrics(ctx context.Context, dataType component.DataType, sent, failed int64) { var sentMeasure, failedMeasure metric.Int64Counter switch dataType { case component.DataTypeTraces: - sentMeasure = or.telemetryBuilder.ExporterSentSpans - failedMeasure = or.telemetryBuilder.ExporterSendFailedSpans + sentMeasure = or.TelemetryBuilder.ExporterSentSpans + failedMeasure = or.TelemetryBuilder.ExporterSendFailedSpans case component.DataTypeMetrics: - sentMeasure = or.telemetryBuilder.ExporterSentMetricPoints - failedMeasure = or.telemetryBuilder.ExporterSendFailedMetricPoints + sentMeasure = or.TelemetryBuilder.ExporterSentMetricPoints + failedMeasure = or.TelemetryBuilder.ExporterSendFailedMetricPoints case component.DataTypeLogs: - sentMeasure = or.telemetryBuilder.ExporterSentLogRecords - failedMeasure = or.telemetryBuilder.ExporterSendFailedLogRecords + sentMeasure = or.TelemetryBuilder.ExporterSentLogRecords + failedMeasure = or.TelemetryBuilder.ExporterSendFailedLogRecords } sentMeasure.Add(ctx, sent, metric.WithAttributes(or.otelAttrs...)) @@ -144,15 +143,15 @@ func toNumItems(numExportedItems int, err error) (int64, int64) { return int64(numExportedItems), 0 } -func (or *obsReport) recordEnqueueFailure(ctx context.Context, dataType component.DataType, failed int64) { +func (or *ObsReport) RecordEnqueueFailure(ctx context.Context, dataType component.DataType, failed int64) { var enqueueFailedMeasure metric.Int64Counter switch dataType { case component.DataTypeTraces: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedSpans + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedSpans case component.DataTypeMetrics: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedMetricPoints + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedMetricPoints case component.DataTypeLogs: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedLogRecords + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedLogRecords } enqueueFailedMeasure.Add(ctx, failed, metric.WithAttributes(or.otelAttrs...)) diff --git a/exporter/exporterhelper/obsexporter_test.go b/exporter/exporterhelper/internal/obsexporter_test.go similarity index 73% rename from exporter/exporterhelper/obsexporter_test.go rename to exporter/exporterhelper/internal/obsexporter_test.go index ac939e21127..cafe163581c 100644 --- a/exporter/exporterhelper/obsexporter_test.go +++ b/exporter/exporterhelper/internal/obsexporter_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package internal import ( "context" @@ -16,7 +16,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal" ) var ( @@ -30,9 +29,9 @@ func TestExportTraceDataOp(t *testing.T) { parentCtx, parentSpan := tt.TelemetrySettings().TracerProvider.Tracer("test").Start(context.Background(), t.Name()) defer parentSpan.End() - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) @@ -41,9 +40,9 @@ func TestExportTraceDataOp(t *testing.T) { {items: 14, err: errFake}, } for i := range params { - ctx := obsrep.startTracesOp(parentCtx) + ctx := obsrep.StartTracesOp(parentCtx) assert.NotNil(t, ctx) - obsrep.endTracesOp(ctx, params[i].items, params[i].err) + obsrep.EndTracesOp(ctx, params[i].items, params[i].err) } spans := tt.SpanRecorder.Ended() @@ -55,13 +54,13 @@ func TestExportTraceDataOp(t *testing.T) { switch { case params[i].err == nil: sentSpans += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentSpansKey, Value: attribute.Int64Value(int64(params[i].items))}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendSpansKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentSpansKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendSpansKey, Value: attribute.Int64Value(0)}) assert.Equal(t, codes.Unset, span.Status().Code) case errors.Is(params[i].err, errFake): failedToSendSpans += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentSpansKey, Value: attribute.Int64Value(0)}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendSpansKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentSpansKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendSpansKey, Value: attribute.Int64Value(int64(params[i].items))}) assert.Equal(t, codes.Error, span.Status().Code) assert.Equal(t, params[i].err.Error(), span.Status().Description) default: @@ -78,9 +77,9 @@ func TestExportMetricsOp(t *testing.T) { parentCtx, parentSpan := tt.TelemetrySettings().TracerProvider.Tracer("test").Start(context.Background(), t.Name()) defer parentSpan.End() - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) @@ -89,10 +88,10 @@ func TestExportMetricsOp(t *testing.T) { {items: 23, err: errFake}, } for i := range params { - ctx := obsrep.startMetricsOp(parentCtx) + ctx := obsrep.StartMetricsOp(parentCtx) assert.NotNil(t, ctx) - obsrep.endMetricsOp(ctx, params[i].items, params[i].err) + obsrep.EndMetricsOp(ctx, params[i].items, params[i].err) } spans := tt.SpanRecorder.Ended() @@ -104,13 +103,13 @@ func TestExportMetricsOp(t *testing.T) { switch { case params[i].err == nil: sentMetricPoints += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentMetricPointsKey, Value: attribute.Int64Value(int64(params[i].items))}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendMetricPointsKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentMetricPointsKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendMetricPointsKey, Value: attribute.Int64Value(0)}) assert.Equal(t, codes.Unset, span.Status().Code) case errors.Is(params[i].err, errFake): failedToSendMetricPoints += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentMetricPointsKey, Value: attribute.Int64Value(0)}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendMetricPointsKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentMetricPointsKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendMetricPointsKey, Value: attribute.Int64Value(int64(params[i].items))}) assert.Equal(t, codes.Error, span.Status().Code) assert.Equal(t, params[i].err.Error(), span.Status().Description) default: @@ -127,9 +126,9 @@ func TestExportLogsOp(t *testing.T) { parentCtx, parentSpan := tt.TelemetrySettings().TracerProvider.Tracer("test").Start(context.Background(), t.Name()) defer parentSpan.End() - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) @@ -138,10 +137,10 @@ func TestExportLogsOp(t *testing.T) { {items: 23, err: errFake}, } for i := range params { - ctx := obsrep.startLogsOp(parentCtx) + ctx := obsrep.StartLogsOp(parentCtx) assert.NotNil(t, ctx) - obsrep.endLogsOp(ctx, params[i].items, params[i].err) + obsrep.EndLogsOp(ctx, params[i].items, params[i].err) } spans := tt.SpanRecorder.Ended() @@ -153,13 +152,13 @@ func TestExportLogsOp(t *testing.T) { switch { case params[i].err == nil: sentLogRecords += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentLogRecordsKey, Value: attribute.Int64Value(int64(params[i].items))}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendLogRecordsKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentLogRecordsKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendLogRecordsKey, Value: attribute.Int64Value(0)}) assert.Equal(t, codes.Unset, span.Status().Code) case errors.Is(params[i].err, errFake): failedToSendLogRecords += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentLogRecordsKey, Value: attribute.Int64Value(0)}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendLogRecordsKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentLogRecordsKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendLogRecordsKey, Value: attribute.Int64Value(int64(params[i].items))}) assert.Equal(t, codes.Error, span.Status().Code) assert.Equal(t, params[i].err.Error(), span.Status().Description) default: @@ -176,14 +175,14 @@ func TestCheckExporterTracesViews(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) - ctx := obsrep.startTracesOp(context.Background()) + ctx := obsrep.StartTracesOp(context.Background()) require.NotNil(t, ctx) - obsrep.endTracesOp(ctx, 7, nil) + obsrep.EndTracesOp(ctx, 7, nil) require.NoError(t, tt.CheckExporterTraces(7, 0)) require.Error(t, tt.CheckExporterTraces(7, 7)) @@ -196,14 +195,14 @@ func TestCheckExporterMetricsViews(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) - ctx := obsrep.startMetricsOp(context.Background()) + ctx := obsrep.StartMetricsOp(context.Background()) require.NotNil(t, ctx) - obsrep.endMetricsOp(ctx, 7, nil) + obsrep.EndMetricsOp(ctx, 7, nil) require.NoError(t, tt.CheckExporterMetrics(7, 0)) require.Error(t, tt.CheckExporterMetrics(7, 7)) @@ -216,14 +215,14 @@ func TestCheckExporterLogsViews(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) - ctx := obsrep.startLogsOp(context.Background()) + ctx := obsrep.StartLogsOp(context.Background()) require.NotNil(t, ctx) - obsrep.endLogsOp(ctx, 7, nil) + obsrep.EndLogsOp(ctx, 7, nil) require.NoError(t, tt.CheckExporterLogs(7, 0)) require.Error(t, tt.CheckExporterLogs(7, 7)) diff --git a/exporter/exporterhelper/internal/queue_sender.go b/exporter/exporterhelper/internal/queue_sender.go new file mode 100644 index 00000000000..60a94966336 --- /dev/null +++ b/exporter/exporterhelper/internal/queue_sender.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/multierr" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterqueue" + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/exporter/internal/queue" +) + +const defaultQueueSize = 1000 + +// Deprecated: [v0.110.0] Use QueueConfig instead. +type QueueSettings = QueueConfig + +// QueueConfig defines configuration for queueing batches before sending to the consumerSender. +type QueueConfig struct { + // Enabled indicates whether to not enqueue batches before sending to the consumerSender. + Enabled bool `mapstructure:"enabled"` + // NumConsumers is the number of consumers from the queue. Defaults to 10. + // If batching is enabled, a combined batch cannot contain more requests than the number of consumers. + // So it's recommended to set higher number of consumers if batching is enabled. + NumConsumers int `mapstructure:"num_consumers"` + // QueueSize is the maximum number of batches allowed in queue at a given time. + QueueSize int `mapstructure:"queue_size"` + // StorageID if not empty, enables the persistent storage and uses the component specified + // as a storage extension for the persistent queue + StorageID *component.ID `mapstructure:"storage"` +} + +// Deprecated: [v0.110.0] Use NewDefaultQueueConfig instead. +func NewDefaultQueueSettings() QueueSettings { + return NewDefaultQueueConfig() +} + +// NewDefaultQueueConfig returns the default config for QueueConfig. +func NewDefaultQueueConfig() QueueConfig { + return QueueConfig{ + Enabled: true, + NumConsumers: 10, + // By default, batches are 8192 spans, for a total of up to 8 million spans in the queue + // This can be estimated at 1-4 GB worth of maximum memory usage + // This default is probably still too high, and may be adjusted further down in a future release + QueueSize: defaultQueueSize, + } +} + +// Validate checks if the QueueConfig configuration is valid +func (qCfg *QueueConfig) Validate() error { + if !qCfg.Enabled { + return nil + } + + if qCfg.QueueSize <= 0 { + return errors.New("queue size must be positive") + } + + if qCfg.NumConsumers <= 0 { + return errors.New("number of queue consumers must be positive") + } + + return nil +} + +type QueueSender struct { + BaseRequestSender + queue exporterqueue.Queue[internal.Request] + numConsumers int + traceAttribute attribute.KeyValue + consumers *queue.Consumers[internal.Request] + + obsrep *ObsReport + exporterID component.ID +} + +func NewQueueSender(q exporterqueue.Queue[internal.Request], set exporter.Settings, numConsumers int, + exportFailureMessage string, obsrep *ObsReport) *QueueSender { + qs := &QueueSender{ + queue: q, + numConsumers: numConsumers, + traceAttribute: attribute.String(ExporterKey, set.ID.String()), + obsrep: obsrep, + exporterID: set.ID, + } + consumeFunc := func(ctx context.Context, req internal.Request) error { + err := qs.NextSender.Send(ctx, req) + if err != nil { + set.Logger.Error("Exporting failed. Dropping data."+exportFailureMessage, + zap.Error(err), zap.Int("dropped_items", req.ItemsCount())) + } + return err + } + qs.consumers = queue.NewQueueConsumers[internal.Request](q, numConsumers, consumeFunc) + return qs +} + +// Start is invoked during service startup. +func (qs *QueueSender) Start(ctx context.Context, host component.Host) error { + if err := qs.consumers.Start(ctx, host); err != nil { + return err + } + + dataTypeAttr := attribute.String(DataTypeKey, qs.obsrep.DataType.String()) + return multierr.Append( + qs.obsrep.TelemetryBuilder.InitExporterQueueSize(func() int64 { return int64(qs.queue.Size()) }, + metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute, dataTypeAttr))), + qs.obsrep.TelemetryBuilder.InitExporterQueueCapacity(func() int64 { return int64(qs.queue.Capacity()) }, + metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute))), + ) +} + +// Shutdown is invoked during service shutdown. +func (qs *QueueSender) Shutdown(ctx context.Context) error { + // Stop the queue and consumers, this will drain the queue and will call the retry (which is stopped) that will only + // try once every request. + return qs.consumers.Shutdown(ctx) +} + +// send implements the requestSender interface. It puts the request in the queue. +func (qs *QueueSender) Send(ctx context.Context, req internal.Request) error { + // Prevent cancellation and deadline to propagate to the context stored in the queue. + // The grpc/http based receivers will cancel the request context after this function returns. + c := context.WithoutCancel(ctx) + + span := trace.SpanFromContext(c) + if err := qs.queue.Offer(c, req); err != nil { + span.AddEvent("Failed to enqueue item.", trace.WithAttributes(qs.traceAttribute)) + return err + } + + span.AddEvent("Enqueued item.", trace.WithAttributes(qs.traceAttribute)) + return nil +} + +type MockHost struct { + component.Host + Ext map[component.ID]component.Component +} + +func (nh *MockHost) GetExtensions() map[component.ID]component.Component { + return nh.Ext +} diff --git a/exporter/exporterhelper/queue_sender_test.go b/exporter/exporterhelper/internal/queue_sender_test.go similarity index 77% rename from exporter/exporterhelper/queue_sender_test.go rename to exporter/exporterhelper/internal/queue_sender_test.go index f86b5d2aad1..a9dd5ab7cea 100644 --- a/exporter/exporterhelper/queue_sender_test.go +++ b/exporter/exporterhelper/internal/queue_sender_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package internal import ( "context" @@ -19,9 +19,9 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/exporter/internal" "go.opentelemetry.io/collector/exporter/internal/queue" ) @@ -29,45 +29,45 @@ func TestQueuedRetry_StopWhileWaiting(t *testing.T) { qCfg := NewDefaultQueueConfig() qCfg.NumConsumers = 1 rCfg := configretry.NewDefaultBackOffConfig() - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) firstMockR := newErrorRequest() ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), firstMockR)) + require.NoError(t, be.Send(context.Background(), firstMockR)) }) // Enqueue another request to ensure when calling shutdown we drain the queue. secondMockR := newMockRequest(3, nil) ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), secondMockR)) + require.NoError(t, be.Send(context.Background(), secondMockR)) }) - require.LessOrEqual(t, 1, be.queueSender.(*queueSender).queue.Size()) + require.LessOrEqual(t, 1, be.QueueSender.(*QueueSender).queue.Size()) require.NoError(t, be.Shutdown(context.Background())) secondMockR.checkNumRequests(t, 1) ocs.checkSendItemsCount(t, 3) ocs.checkDroppedItemsCount(t, 7) - require.Zero(t, be.queueSender.(*queueSender).queue.Size()) + require.Zero(t, be.QueueSender.(*QueueSender).queue.Size()) } func TestQueuedRetry_DoNotPreserveCancellation(t *testing.T) { qCfg := NewDefaultQueueConfig() qCfg.NumConsumers = 1 rCfg := configretry.NewDefaultBackOffConfig() - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -78,14 +78,14 @@ func TestQueuedRetry_DoNotPreserveCancellation(t *testing.T) { mockR := newMockRequest(2, nil) ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(ctx, mockR)) + require.NoError(t, be.Send(ctx, mockR)) }) ocs.awaitAsyncProcessing() mockR.checkNumRequests(t, 1) ocs.checkSendItemsCount(t, 2) ocs.checkDroppedItemsCount(t, 0) - require.Zero(t, be.queueSender.(*queueSender).queue.Size()) + require.Zero(t, be.QueueSender.(*QueueSender).queue.Size()) } func TestQueuedRetry_RejectOnFull(t *testing.T) { @@ -95,15 +95,15 @@ func TestQueuedRetry_RejectOnFull(t *testing.T) { set := exportertest.NewNopSettings() logger, observed := observer.New(zap.ErrorLevel) set.Logger = zap.New(logger) - be, err := newBaseExporter(set, defaultDataType, newNoopObsrepSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(set, defaultDataType, newNoopObsrepSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithQueue(qCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) }) - require.Error(t, be.send(context.Background(), newMockRequest(2, nil))) + require.Error(t, be.Send(context.Background(), newMockRequest(2, nil))) assert.Len(t, observed.All(), 1) assert.Equal(t, "Exporting failed. Rejecting data.", observed.All()[0].Message) assert.Equal(t, "sending queue is full", observed.All()[0].ContextMap()["error"]) @@ -117,8 +117,8 @@ func TestQueuedRetryHappyPath(t *testing.T) { { name: "WithQueue", queueOptions: []Option{ - withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithQueue(QueueConfig{ Enabled: true, QueueSize: 10, @@ -134,7 +134,7 @@ func TestQueuedRetryHappyPath(t *testing.T) { Enabled: true, QueueSize: 10, NumConsumers: 1, - }, exporterqueue.NewMemoryQueueFactory[Request]()), + }, exporterqueue.NewMemoryQueueFactory[internal.Request]()), WithRetry(configretry.NewDefaultBackOffConfig()), }, }, @@ -145,7 +145,7 @@ func TestQueuedRetryHappyPath(t *testing.T) { Enabled: true, QueueSize: 10, NumConsumers: 1, - }, exporterqueue.NewPersistentQueueFactory[Request](nil, exporterqueue.PersistentQueueSettings[Request]{})), + }, exporterqueue.NewPersistentQueueFactory[internal.Request](nil, exporterqueue.PersistentQueueSettings[internal.Request]{})), WithRetry(configretry.NewDefaultBackOffConfig()), }, }, @@ -156,7 +156,7 @@ func TestQueuedRetryHappyPath(t *testing.T) { Enabled: true, QueueSize: 10, NumConsumers: 1, - }, exporterqueue.NewPersistentQueueFactory[Request](nil, exporterqueue.PersistentQueueSettings[Request]{})), + }, exporterqueue.NewPersistentQueueFactory[internal.Request](nil, exporterqueue.PersistentQueueSettings[internal.Request]{})), WithRetry(configretry.NewDefaultBackOffConfig()), }, }, @@ -168,9 +168,9 @@ func TestQueuedRetryHappyPath(t *testing.T) { t.Cleanup(func() { require.NoError(t, tel.Shutdown(context.Background())) }) set := exporter.Settings{ID: defaultID, TelemetrySettings: tel.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()} - be, err := newBaseExporter(set, defaultDataType, newObservabilityConsumerSender, tt.queueOptions...) + be, err := NewBaseExporter(set, defaultDataType, newObservabilityConsumerSender, tt.queueOptions...) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) wantRequests := 10 reqs := make([]*mockRequest, 0, 10) @@ -178,12 +178,12 @@ func TestQueuedRetryHappyPath(t *testing.T) { ocs.run(func() { req := newMockRequest(2, nil) reqs = append(reqs, req) - require.NoError(t, be.send(context.Background(), req)) + require.NoError(t, be.Send(context.Background(), req)) }) } // expect queue to be full - require.Error(t, be.send(context.Background(), newMockRequest(2, nil))) + require.Error(t, be.Send(context.Background(), newMockRequest(2, nil))) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { @@ -214,8 +214,8 @@ func TestQueuedRetry_QueueMetricsReported(t *testing.T) { qCfg.NumConsumers = 0 // to make every request go straight to the queue rCfg := configretry.NewDefaultBackOffConfig() set := exporter.Settings{ID: defaultID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()} - be, err := newBaseExporter(set, dataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(set, dataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -223,10 +223,10 @@ func TestQueuedRetry_QueueMetricsReported(t *testing.T) { require.NoError(t, tt.CheckExporterMetricGauge("otelcol_exporter_queue_capacity", int64(defaultQueueSize))) for i := 0; i < 7; i++ { - require.NoError(t, be.send(context.Background(), newErrorRequest())) + require.NoError(t, be.Send(context.Background(), newErrorRequest())) } require.NoError(t, tt.CheckExporterMetricGauge("otelcol_exporter_queue_size", int64(7), - attribute.String(internal.DataTypeKey, dataType.String()))) + attribute.String(DataTypeKey, dataType.String()))) assert.NoError(t, be.Shutdown(context.Background())) } @@ -273,8 +273,8 @@ func TestQueueRetryWithDisabledQueue(t *testing.T) { { name: "WithQueue", queueOptions: []Option{ - withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), func() Option { qs := NewDefaultQueueConfig() qs.Enabled = false @@ -288,7 +288,7 @@ func TestQueueRetryWithDisabledQueue(t *testing.T) { func() Option { qs := exporterqueue.NewDefaultConfig() qs.Enabled = false - return WithRequestQueue(qs, exporterqueue.NewMemoryQueueFactory[Request]()) + return WithRequestQueue(qs, exporterqueue.NewMemoryQueueFactory[internal.Request]()) }(), }, }, @@ -299,13 +299,13 @@ func TestQueueRetryWithDisabledQueue(t *testing.T) { set := exportertest.NewNopSettings() logger, observed := observer.New(zap.ErrorLevel) set.Logger = zap.New(logger) - be, err := newBaseExporter(set, component.DataTypeLogs, newObservabilityConsumerSender, tt.queueOptions...) + be, err := NewBaseExporter(set, component.DataTypeLogs, newObservabilityConsumerSender, tt.queueOptions...) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) mockR := newMockRequest(2, errors.New("some error")) ocs.run(func() { - require.Error(t, be.send(context.Background(), mockR)) + require.Error(t, be.Send(context.Background(), mockR)) }) assert.Len(t, observed.All(), 1) assert.Equal(t, "Exporting failed. Rejecting data. Try enabling sending_queue to survive temporary failures.", observed.All()[0].Message) @@ -323,12 +323,12 @@ func TestQueueFailedRequestDropped(t *testing.T) { set := exportertest.NewNopSettings() logger, observed := observer.New(zap.ErrorLevel) set.Logger = zap.New(logger) - be, err := newBaseExporter(set, component.DataTypeLogs, newNoopObsrepSender, - WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[Request]())) + be, err := NewBaseExporter(set, component.DataTypeLogs, newNoopObsrepSender, + WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[internal.Request]())) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) mockR := newMockRequest(2, errors.New("some error")) - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) require.NoError(t, be.Shutdown(context.Background())) mockR.checkNumRequests(t, 1) assert.Len(t, observed.All(), 1) @@ -345,15 +345,15 @@ func TestQueuedRetryPersistenceEnabled(t *testing.T) { qCfg.StorageID = &storageID // enable persistence rCfg := configretry.NewDefaultBackOffConfig() set := exporter.Settings{ID: defaultID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()} - be, err := newBaseExporter(set, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(set, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) var extensions = map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(nil), } - host := &mockHost{ext: extensions} + host := &MockHost{Ext: extensions} // we start correctly with a file storage extension require.NoError(t, be.Start(context.Background(), host)) @@ -371,14 +371,14 @@ func TestQueuedRetryPersistenceEnabledStorageError(t *testing.T) { qCfg.StorageID = &storageID // enable persistence rCfg := configretry.NewDefaultBackOffConfig() set := exporter.Settings{ID: defaultID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()} - be, err := newBaseExporter(set, defaultDataType, newObservabilityConsumerSender, withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) + be, err := NewBaseExporter(set, defaultDataType, newObservabilityConsumerSender, WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) var extensions = map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(storageError), } - host := &mockHost{ext: extensions} + host := &MockHost{Ext: extensions} // we fail to start if we get an error creating the storage client require.Error(t, be.Start(context.Background(), host), "could not get storage client") @@ -395,23 +395,23 @@ func TestQueuedRetryPersistentEnabled_NoDataLossOnShutdown(t *testing.T) { rCfg.MaxElapsedTime = 0 // retry infinitely so shutdown can be triggered mockReq := newErrorRequest() - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(mockReq)), WithRetry(rCfg), WithQueue(qCfg)) + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(mockReq)), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) var extensions = map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(nil), } - host := &mockHost{ext: extensions} + host := &MockHost{Ext: extensions} require.NoError(t, be.Start(context.Background(), host)) // Invoke queuedRetrySender so the producer will put the item for consumer to poll - require.NoError(t, be.send(context.Background(), mockReq)) + require.NoError(t, be.Send(context.Background(), mockReq)) // first wait for the item to be consumed from the queue assert.Eventually(t, func() bool { - return be.queueSender.(*queueSender).queue.Size() == 0 + return be.QueueSender.(*QueueSender).queue.Size() == 0 }, time.Second, 1*time.Millisecond) // shuts down the exporter, unsent data should be preserved as in-flight data in the persistent queue. @@ -419,8 +419,8 @@ func TestQueuedRetryPersistentEnabled_NoDataLossOnShutdown(t *testing.T) { // start the exporter again replacing the preserved mockRequest in the unmarshaler with a new one that doesn't fail. replacedReq := newMockRequest(1, nil) - be, err = newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(replacedReq)), WithRetry(rCfg), WithQueue(qCfg)) + be, err = NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(replacedReq)), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), host)) t.Cleanup(func() { require.NoError(t, be.Shutdown(context.Background())) }) @@ -430,22 +430,13 @@ func TestQueuedRetryPersistentEnabled_NoDataLossOnShutdown(t *testing.T) { } func TestQueueSenderNoStartShutdown(t *testing.T) { - queue := queue.NewBoundedMemoryQueue[Request](queue.MemoryQueueSettings[Request]{}) + queue := queue.NewBoundedMemoryQueue[internal.Request](queue.MemoryQueueSettings[internal.Request]{}) set := exportertest.NewNopSettings() - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exportertest.NewNopSettings(), + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exportertest.NewNopSettings(), }) require.NoError(t, err) - qs := newQueueSender(queue, set, 1, "", obsrep) + qs := NewQueueSender(queue, set, 1, "", obsrep) assert.NoError(t, qs.Shutdown(context.Background())) } - -type mockHost struct { - component.Host - ext map[component.ID]component.Component -} - -func (nh *mockHost) GetExtensions() map[component.ID]component.Component { - return nh.ext -} diff --git a/exporter/exporterhelper/request_test.go b/exporter/exporterhelper/internal/request.go similarity index 71% rename from exporter/exporterhelper/request_test.go rename to exporter/exporterhelper/internal/request.go index fe373c67e12..79f58a82608 100644 --- a/exporter/exporterhelper/request_test.go +++ b/exporter/exporterhelper/internal/request.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" import ( "context" @@ -9,6 +9,7 @@ import ( "time" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/internal" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" @@ -54,7 +55,7 @@ func (r *fakeRequest) ItemsCount() int { return r.items } -func fakeBatchMergeFunc(_ context.Context, r1 Request, r2 Request) (Request, error) { +func fakeBatchMergeFunc(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { if r1 == nil { return r2, nil } @@ -71,11 +72,11 @@ func fakeBatchMergeFunc(_ context.Context, r1 Request, r2 Request) (Request, err }, nil } -func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { +func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeConfig, r1 internal.Request, r2 internal.Request) ([]internal.Request, error) { maxItems := cfg.MaxSizeItems if maxItems == 0 { r, err := fakeBatchMergeFunc(ctx, r1, r2) - return []Request{r}, err + return []internal.Request{r}, err } if r2.(*fakeRequest).mergeErr != nil { @@ -84,7 +85,7 @@ func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeCon fr2 := r2.(*fakeRequest) fr2 = &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay} - var res []Request + var res []internal.Request // fill fr1 to maxItems if it's not nil if r1 != nil { @@ -95,7 +96,7 @@ func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeCon if fr2.exportErr != nil { fr1.exportErr = fr2.exportErr } - return []Request{fr1}, nil + return []internal.Request{fr1}, nil } // if split is needed, we don't propagate exportErr from fr2 to fr1 to test more cases fr2.items -= maxItems - fr1.items @@ -116,21 +117,21 @@ func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeCon return res, nil } -type fakeRequestConverter struct { - metricsError error - tracesError error - logsError error - requestError error +type FakeRequestConverter struct { + MetricsError error + TracesError error + LogsError error + RequestError error } -func (frc *fakeRequestConverter) requestFromMetricsFunc(_ context.Context, md pmetric.Metrics) (Request, error) { - return &fakeRequest{items: md.DataPointCount(), exportErr: frc.requestError}, frc.metricsError +func (frc *FakeRequestConverter) RequestFromMetricsFunc(_ context.Context, md pmetric.Metrics) (internal.Request, error) { + return &fakeRequest{items: md.DataPointCount(), exportErr: frc.RequestError}, frc.MetricsError } -func (frc *fakeRequestConverter) requestFromTracesFunc(_ context.Context, md ptrace.Traces) (Request, error) { - return &fakeRequest{items: md.SpanCount(), exportErr: frc.requestError}, frc.tracesError +func (frc *FakeRequestConverter) RequestFromTracesFunc(_ context.Context, md ptrace.Traces) (internal.Request, error) { + return &fakeRequest{items: md.SpanCount(), exportErr: frc.RequestError}, frc.TracesError } -func (frc *fakeRequestConverter) requestFromLogsFunc(_ context.Context, md plog.Logs) (Request, error) { - return &fakeRequest{items: md.LogRecordCount(), exportErr: frc.requestError}, frc.logsError +func (frc *FakeRequestConverter) RequestFromLogsFunc(_ context.Context, md plog.Logs) (internal.Request, error) { + return &fakeRequest{items: md.LogRecordCount(), exportErr: frc.RequestError}, frc.LogsError } diff --git a/exporter/exporterhelper/internal/request_sender.go b/exporter/exporterhelper/internal/request_sender.go new file mode 100644 index 00000000000..683aca40d79 --- /dev/null +++ b/exporter/exporterhelper/internal/request_sender.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" // RequestSender is an abstraction of a sender for a request independent of the type of the data (traces, metrics, logs). + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/internal" +) + +type RequestSender interface { + component.Component + Send(context.Context, internal.Request) error + SetNextSender(nextSender RequestSender) +} + +type BaseRequestSender struct { + component.StartFunc + component.ShutdownFunc + NextSender RequestSender +} + +var _ RequestSender = (*BaseRequestSender)(nil) + +func (b *BaseRequestSender) Send(ctx context.Context, req internal.Request) error { + return b.NextSender.Send(ctx, req) +} + +func (b *BaseRequestSender) SetNextSender(nextSender RequestSender) { + b.NextSender = nextSender +} diff --git a/exporter/exporterhelper/internal/retry_sender.go b/exporter/exporterhelper/internal/retry_sender.go new file mode 100644 index 00000000000..c6648785183 --- /dev/null +++ b/exporter/exporterhelper/internal/retry_sender.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/exporter/internal/experr" +) + +// TODO: Clean this by forcing all exporters to return an internal error type that always include the information about retries. +type throttleRetry struct { + err error + delay time.Duration +} + +func (t throttleRetry) Error() string { + return "Throttle (" + t.delay.String() + "), error: " + t.err.Error() +} + +func (t throttleRetry) Unwrap() error { + return t.err +} + +// NewThrottleRetry creates a new throttle retry error. +func NewThrottleRetry(err error, delay time.Duration) error { + return throttleRetry{ + err: err, + delay: delay, + } +} + +type retrySender struct { + BaseRequestSender + traceAttribute attribute.KeyValue + cfg configretry.BackOffConfig + stopCh chan struct{} + logger *zap.Logger +} + +func newRetrySender(config configretry.BackOffConfig, set exporter.Settings) *retrySender { + return &retrySender{ + traceAttribute: attribute.String(ExporterKey, set.ID.String()), + cfg: config, + stopCh: make(chan struct{}), + logger: set.Logger, + } +} + +func (rs *retrySender) Shutdown(context.Context) error { + close(rs.stopCh) + return nil +} + +// send implements the requestSender interface +func (rs *retrySender) Send(ctx context.Context, req internal.Request) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here must + // call Reset after changing the InitialInterval (this saves an unnecessary call to Now). + expBackoff := backoff.ExponentialBackOff{ + InitialInterval: rs.cfg.InitialInterval, + RandomizationFactor: rs.cfg.RandomizationFactor, + Multiplier: rs.cfg.Multiplier, + MaxInterval: rs.cfg.MaxInterval, + MaxElapsedTime: rs.cfg.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + expBackoff.Reset() + span := trace.SpanFromContext(ctx) + retryNum := int64(0) + for { + span.AddEvent( + "Sending request.", + trace.WithAttributes(rs.traceAttribute, attribute.Int64("retry_num", retryNum))) + + err := rs.NextSender.Send(ctx, req) + if err == nil { + return nil + } + + // Immediately drop data on permanent errors. + if consumererror.IsPermanent(err) { + return fmt.Errorf("not retryable error: %w", err) + } + + req = internal.ExtractPartialRequest(req, err) + + backoffDelay := expBackoff.NextBackOff() + if backoffDelay == backoff.Stop { + return fmt.Errorf("no more retries left: %w", err) + } + + throttleErr := throttleRetry{} + if errors.As(err, &throttleErr) { + backoffDelay = max(backoffDelay, throttleErr.delay) + } + + backoffDelayStr := backoffDelay.String() + span.AddEvent( + "Exporting failed. Will retry the request after interval.", + trace.WithAttributes( + rs.traceAttribute, + attribute.String("interval", backoffDelayStr), + attribute.String("error", err.Error()))) + rs.logger.Info( + "Exporting failed. Will retry the request after interval.", + zap.Error(err), + zap.String("interval", backoffDelayStr), + ) + retryNum++ + + // back-off, but get interrupted when shutting down or request is cancelled or timed out. + select { + case <-ctx.Done(): + return fmt.Errorf("request is cancelled or timed out %w", err) + case <-rs.stopCh: + return experr.NewShutdownErr(err) + case <-time.After(backoffDelay): + } + } +} + +// max returns the larger of x or y. +func max(x, y time.Duration) time.Duration { + if x < y { + return y + } + return x +} diff --git a/exporter/exporterhelper/retry_sender_test.go b/exporter/exporterhelper/internal/retry_sender_test.go similarity index 78% rename from exporter/exporterhelper/retry_sender_test.go rename to exporter/exporterhelper/internal/retry_sender_test.go index a0dab1f8782..f4cc0f5ee0b 100644 --- a/exporter/exporterhelper/retry_sender_test.go +++ b/exporter/exporterhelper/internal/retry_sender_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package internal import ( "context" @@ -22,16 +22,17 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/exporter/internal" "go.opentelemetry.io/collector/pdata/testdata" ) -func mockRequestUnmarshaler(mr Request) exporterqueue.Unmarshaler[Request] { - return func([]byte) (Request, error) { +func mockRequestUnmarshaler(mr internal.Request) exporterqueue.Unmarshaler[internal.Request] { + return func([]byte) (internal.Request, error) { return mr, nil } } -func mockRequestMarshaler(Request) ([]byte, error) { +func mockRequestMarshaler(internal.Request) ([]byte, error) { return []byte("mockRequest"), nil } @@ -39,10 +40,10 @@ func TestQueuedRetry_DropOnPermanentError(t *testing.T) { qCfg := NewDefaultQueueConfig() rCfg := configretry.NewDefaultBackOffConfig() mockR := newMockRequest(2, consumererror.NewPermanent(errors.New("bad data"))) - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(mockR)), WithRetry(rCfg), WithQueue(qCfg)) + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(mockR)), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -50,7 +51,7 @@ func TestQueuedRetry_DropOnPermanentError(t *testing.T) { ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() // In the newMockConcurrentExporter we count requests and items even for failed requests @@ -63,11 +64,11 @@ func TestQueuedRetry_DropOnNoRetry(t *testing.T) { qCfg := NewDefaultQueueConfig() rCfg := configretry.NewDefaultBackOffConfig() rCfg.Enabled = false - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(newMockRequest(2, errors.New("transient error")))), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(newMockRequest(2, errors.New("transient error")))), WithQueue(qCfg), WithRetry(rCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -76,7 +77,7 @@ func TestQueuedRetry_DropOnNoRetry(t *testing.T) { mockR := newMockRequest(2, errors.New("transient error")) ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() // In the newMockConcurrentExporter we count requests and items even for failed requests @@ -90,8 +91,8 @@ func TestQueuedRetry_OnError(t *testing.T) { qCfg.NumConsumers = 1 rCfg := configretry.NewDefaultBackOffConfig() rCfg.InitialInterval = 0 - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -101,10 +102,10 @@ func TestQueuedRetry_OnError(t *testing.T) { traceErr := consumererror.NewTraces(errors.New("some error"), testdata.GenerateTraces(1)) mockR := newMockRequest(2, traceErr) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() @@ -120,11 +121,11 @@ func TestQueuedRetry_MaxElapsedTime(t *testing.T) { rCfg := configretry.NewDefaultBackOffConfig() rCfg.InitialInterval = time.Millisecond rCfg.MaxElapsedTime = 100 * time.Millisecond - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -132,14 +133,14 @@ func TestQueuedRetry_MaxElapsedTime(t *testing.T) { ocs.run(func() { // Add an item that will always fail. - require.NoError(t, be.send(context.Background(), newErrorRequest())) + require.NoError(t, be.Send(context.Background(), newErrorRequest())) }) mockR := newMockRequest(2, nil) start := time.Now() ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() @@ -152,7 +153,7 @@ func TestQueuedRetry_MaxElapsedTime(t *testing.T) { mockR.checkNumRequests(t, 1) ocs.checkSendItemsCount(t, 2) ocs.checkDroppedItemsCount(t, 7) - require.Zero(t, be.queueSender.(*queueSender).queue.Size()) + require.Zero(t, be.QueueSender.(*QueueSender).queue.Size()) } type wrappedError struct { @@ -168,11 +169,11 @@ func TestQueuedRetry_ThrottleError(t *testing.T) { qCfg.NumConsumers = 1 rCfg := configretry.NewDefaultBackOffConfig() rCfg.InitialInterval = 10 * time.Millisecond - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -183,7 +184,7 @@ func TestQueuedRetry_ThrottleError(t *testing.T) { start := time.Now() ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() @@ -193,7 +194,7 @@ func TestQueuedRetry_ThrottleError(t *testing.T) { mockR.checkNumRequests(t, 2) ocs.checkSendItemsCount(t, 2) ocs.checkDroppedItemsCount(t, 0) - require.Zero(t, be.queueSender.(*queueSender).queue.Size()) + require.Zero(t, be.QueueSender.(*QueueSender).queue.Size()) } func TestQueuedRetry_RetryOnError(t *testing.T) { @@ -202,11 +203,11 @@ func TestQueuedRetry_RetryOnError(t *testing.T) { qCfg.QueueSize = 1 rCfg := configretry.NewDefaultBackOffConfig() rCfg.InitialInterval = 0 - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -215,7 +216,7 @@ func TestQueuedRetry_RetryOnError(t *testing.T) { mockR := newMockRequest(2, errors.New("transient error")) ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() @@ -223,19 +224,19 @@ func TestQueuedRetry_RetryOnError(t *testing.T) { mockR.checkNumRequests(t, 2) ocs.checkSendItemsCount(t, 2) ocs.checkDroppedItemsCount(t, 0) - require.Zero(t, be.queueSender.(*queueSender).queue.Size()) + require.Zero(t, be.QueueSender.(*QueueSender).queue.Size()) } func TestQueueRetryWithNoQueue(t *testing.T) { rCfg := configretry.NewDefaultBackOffConfig() rCfg.MaxElapsedTime = time.Nanosecond // fail fast - be, err := newBaseExporter(exportertest.NewNopSettings(), component.DataTypeLogs, newObservabilityConsumerSender, WithRetry(rCfg)) + be, err := NewBaseExporter(exportertest.NewNopSettings(), component.DataTypeLogs, newObservabilityConsumerSender, WithRetry(rCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) mockR := newMockRequest(2, errors.New("some error")) ocs.run(func() { - require.Error(t, be.send(context.Background(), mockR)) + require.Error(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() mockR.checkNumRequests(t, 1) @@ -250,13 +251,13 @@ func TestQueueRetryWithDisabledRetires(t *testing.T) { set := exportertest.NewNopSettings() logger, observed := observer.New(zap.ErrorLevel) set.Logger = zap.New(logger) - be, err := newBaseExporter(set, component.DataTypeLogs, newObservabilityConsumerSender, WithRetry(rCfg)) + be, err := NewBaseExporter(set, component.DataTypeLogs, newObservabilityConsumerSender, WithRetry(rCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) mockR := newMockRequest(2, errors.New("some error")) ocs.run(func() { - require.Error(t, be.send(context.Background(), mockR)) + require.Error(t, be.Send(context.Background(), mockR)) }) assert.Len(t, observed.All(), 1) assert.Equal(t, "Exporting failed. Rejecting data. "+ @@ -274,7 +275,7 @@ func (mer *mockErrorRequest) Export(context.Context) error { return errors.New("transient error") } -func (mer *mockErrorRequest) OnError(error) Request { +func (mer *mockErrorRequest) OnError(error) internal.Request { return mer } @@ -282,7 +283,7 @@ func (mer *mockErrorRequest) ItemsCount() int { return 7 } -func newErrorRequest() Request { +func newErrorRequest() internal.Request { return &mockErrorRequest{} } @@ -306,7 +307,7 @@ func (m *mockRequest) Export(ctx context.Context) error { return ctx.Err() } -func (m *mockRequest) OnError(error) Request { +func (m *mockRequest) OnError(error) internal.Request { return &mockRequest{ cnt: 1, consumeError: nil, @@ -333,13 +334,13 @@ func newMockRequest(cnt int, consumeError error) *mockRequest { } type observabilityConsumerSender struct { - baseRequestSender + BaseRequestSender waitGroup *sync.WaitGroup sentItemsCount *atomic.Int64 droppedItemsCount *atomic.Int64 } -func newObservabilityConsumerSender(*obsReport) requestSender { +func newObservabilityConsumerSender(*ObsReport) RequestSender { return &observabilityConsumerSender{ waitGroup: new(sync.WaitGroup), droppedItemsCount: &atomic.Int64{}, @@ -347,8 +348,8 @@ func newObservabilityConsumerSender(*obsReport) requestSender { } } -func (ocs *observabilityConsumerSender) send(ctx context.Context, req Request) error { - err := ocs.nextSender.send(ctx, req) +func (ocs *observabilityConsumerSender) Send(ctx context.Context, req internal.Request) error { + err := ocs.NextSender.Send(ctx, req) if err != nil { ocs.droppedItemsCount.Add(int64(req.ItemsCount())) } else { diff --git a/exporter/exporterhelper/internal/timeout_sender.go b/exporter/exporterhelper/internal/timeout_sender.go new file mode 100644 index 00000000000..5abae1b6746 --- /dev/null +++ b/exporter/exporterhelper/internal/timeout_sender.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + "time" + + "go.opentelemetry.io/collector/exporter/internal" +) + +// TimeoutConfig for timeout. The timeout applies to individual attempts to send data to the backend. +type TimeoutConfig struct { + // Timeout is the timeout for every attempt to send data to the backend. + // A zero timeout means no timeout. + Timeout time.Duration `mapstructure:"timeout"` +} + +func (ts *TimeoutConfig) Validate() error { + // Negative timeouts are not acceptable, since all sends will fail. + if ts.Timeout < 0 { + return errors.New("'timeout' must be non-negative") + } + return nil +} + +// NewDefaultTimeoutConfig returns the default config for TimeoutConfig. +func NewDefaultTimeoutConfig() TimeoutConfig { + return TimeoutConfig{ + Timeout: 5 * time.Second, + } +} + +// TimeoutSender is a requestSender that adds a `timeout` to every request that passes this sender. +type TimeoutSender struct { + BaseRequestSender + cfg TimeoutConfig +} + +func (ts *TimeoutSender) Send(ctx context.Context, req internal.Request) error { + // TODO: Remove this by avoiding to create the timeout sender if timeout is 0. + if ts.cfg.Timeout == 0 { + return req.Export(ctx) + } + // Intentionally don't overwrite the context inside the request, because in case of retries deadline will not be + // updated because this deadline most likely is before the next one. + tCtx, cancelFunc := context.WithTimeout(ctx, ts.cfg.Timeout) + defer cancelFunc() + return req.Export(tCtx) +} diff --git a/exporter/exporterhelper/timeout_sender_test.go b/exporter/exporterhelper/internal/timeout_sender_test.go similarity index 95% rename from exporter/exporterhelper/timeout_sender_test.go rename to exporter/exporterhelper/internal/timeout_sender_test.go index f72cf565360..8da16b24e18 100644 --- a/exporter/exporterhelper/timeout_sender_test.go +++ b/exporter/exporterhelper/internal/timeout_sender_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package internal import ( "testing" diff --git a/exporter/exporterhelper/logs.go b/exporter/exporterhelper/logs.go index 790ba188657..795bf91408e 100644 --- a/exporter/exporterhelper/logs.go +++ b/exporter/exporterhelper/logs.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/plog" @@ -64,7 +65,7 @@ func (req *logsRequest) ItemsCount() int { } type logsExporter struct { - *baseExporter + *internal.BaseExporter consumer.Logs } @@ -83,8 +84,8 @@ func NewLogsExporter( return nil, errNilPushLogsData } logsOpts := []Option{ - withMarshaler(logsRequestMarshaler), withUnmarshaler(newLogsRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeLogs, mergeSplitLogs), + internal.WithMarshaler(logsRequestMarshaler), internal.WithUnmarshaler(newLogsRequestUnmarshalerFunc(pusher)), + internal.WithBatchFuncs(mergeLogs, mergeSplitLogs), } return NewLogsRequestExporter(ctx, set, requestFromLogs(pusher), append(logsOpts, options...)...) } @@ -118,7 +119,7 @@ func NewLogsRequestExporter( return nil, errNilLogsConverter } - be, err := newBaseExporter(set, component.DataTypeLogs, newLogsExporterWithObservability, options...) + be, err := internal.NewBaseExporter(set, component.DataTypeLogs, newLogsExporterWithObservability, options...) if err != nil { return nil, err } @@ -131,32 +132,32 @@ func NewLogsRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) + sErr := be.Send(ctx, req) if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeLogs, int64(req.ItemsCount())) + be.Obsrep.RecordEnqueueFailure(ctx, component.DataTypeLogs, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) return &logsExporter{ - baseExporter: be, + BaseExporter: be, Logs: lc, }, err } type logsExporterWithObservability struct { - baseRequestSender - obsrep *obsReport + internal.BaseRequestSender + obsrep *internal.ObsReport } -func newLogsExporterWithObservability(obsrep *obsReport) requestSender { +func newLogsExporterWithObservability(obsrep *internal.ObsReport) internal.RequestSender { return &logsExporterWithObservability{obsrep: obsrep} } -func (lewo *logsExporterWithObservability) send(ctx context.Context, req Request) error { - c := lewo.obsrep.startLogsOp(ctx) +func (lewo *logsExporterWithObservability) Send(ctx context.Context, req Request) error { + c := lewo.obsrep.StartLogsOp(ctx) numLogRecords := req.ItemsCount() - err := lewo.nextSender.send(c, req) - lewo.obsrep.endLogsOp(c, numLogRecords, err) + err := lewo.NextSender.Send(c, req) + lewo.obsrep.EndLogsOp(c, numLogRecords, err) return err } diff --git a/exporter/exporterhelper/logs_test.go b/exporter/exporterhelper/logs_test.go index 1fb952d1486..32a50b5e063 100644 --- a/exporter/exporterhelper/logs_test.go +++ b/exporter/exporterhelper/logs_test.go @@ -65,7 +65,7 @@ func TestLogsExporter_NilLogger(t *testing.T) { } func TestLogsRequestExporter_NilLogger(t *testing.T) { - le, err := NewLogsRequestExporter(context.Background(), exporter.Settings{}, (&fakeRequestConverter{}).requestFromLogsFunc) + le, err := NewLogsRequestExporter(context.Background(), exporter.Settings{}, (&internal.FakeRequestConverter{}).RequestFromLogsFunc) require.Nil(t, le) require.Equal(t, errNilLogger, err) } @@ -97,7 +97,7 @@ func TestLogsExporter_Default(t *testing.T) { func TestLogsRequestExporter_Default(t *testing.T) { ld := plog.NewLogs() le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromLogsFunc) + (&internal.FakeRequestConverter{}).RequestFromLogsFunc) assert.NotNil(t, le) require.NoError(t, err) @@ -119,7 +119,7 @@ func TestLogsExporter_WithCapabilities(t *testing.T) { func TestLogsRequestExporter_WithCapabilities(t *testing.T) { capabilities := consumer.Capabilities{MutatesData: true} le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromLogsFunc, WithCapabilities(capabilities)) + (&internal.FakeRequestConverter{}).RequestFromLogsFunc, WithCapabilities(capabilities)) require.NoError(t, err) require.NotNil(t, le) @@ -139,7 +139,7 @@ func TestLogsRequestExporter_Default_ConvertError(t *testing.T) { ld := plog.NewLogs() want := errors.New("convert_error") le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{logsError: want}).requestFromLogsFunc) + (&internal.FakeRequestConverter{LogsError: want}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) require.Equal(t, consumererror.NewPermanent(want), le.ConsumeLogs(context.Background(), ld)) @@ -149,7 +149,7 @@ func TestLogsRequestExporter_Default_ExportError(t *testing.T) { ld := plog.NewLogs() want := errors.New("export_error") le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{requestError: want}).requestFromLogsFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) require.Equal(t, want, le.ConsumeLogs(context.Background(), ld)) @@ -166,7 +166,7 @@ func TestLogsExporter_WithPersistentQueue(t *testing.T) { te, err := NewLogsExporter(context.Background(), set, &fakeLogsExporterConfig, ts.ConsumeLogs, WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - host := &mockHost{ext: map[component.ID]component.Component{ + host := &internal.MockHost{Ext: map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(nil), }} require.NoError(t, te.Start(context.Background(), host)) @@ -213,7 +213,7 @@ func TestLogsRequestExporter_WithRecordMetrics(t *testing.T) { le, err := NewLogsRequestExporter(context.Background(), exporter.Settings{ID: fakeLogsExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{}).requestFromLogsFunc) + (&internal.FakeRequestConverter{}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) @@ -240,7 +240,7 @@ func TestLogsRequestExporter_WithRecordMetrics_ExportError(t *testing.T) { t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) le, err := NewLogsRequestExporter(context.Background(), exporter.Settings{ID: fakeLogsExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{requestError: want}).requestFromLogsFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) @@ -292,7 +292,7 @@ func TestLogsRequestExporter_WithSpan(t *testing.T) { otel.SetTracerProvider(set.TracerProvider) defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) - le, err := NewLogsRequestExporter(context.Background(), set, (&fakeRequestConverter{}).requestFromLogsFunc) + le, err := NewLogsRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) checkWrapSpanForLogsExporter(t, sr, set.TracerProvider.Tracer("test"), le, nil, 1) @@ -320,7 +320,7 @@ func TestLogsRequestExporter_WithSpan_ReturnError(t *testing.T) { defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) want := errors.New("my_error") - le, err := NewLogsRequestExporter(context.Background(), set, (&fakeRequestConverter{requestError: want}).requestFromLogsFunc) + le, err := NewLogsRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{RequestError: want}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) checkWrapSpanForLogsExporter(t, sr, set.TracerProvider.Tracer("test"), le, want, 1) @@ -343,7 +343,7 @@ func TestLogsRequestExporter_WithShutdown(t *testing.T) { shutdown := func(context.Context) error { shutdownCalled = true; return nil } le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromLogsFunc, WithShutdown(shutdown)) + (&internal.FakeRequestConverter{}).RequestFromLogsFunc, WithShutdown(shutdown)) assert.NotNil(t, le) assert.NoError(t, err) @@ -367,7 +367,7 @@ func TestLogsRequestExporter_WithShutdown_ReturnError(t *testing.T) { shutdownErr := func(context.Context) error { return want } le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromLogsFunc, WithShutdown(shutdownErr)) + (&internal.FakeRequestConverter{}).RequestFromLogsFunc, WithShutdown(shutdownErr)) assert.NotNil(t, le) require.NoError(t, err) @@ -424,7 +424,7 @@ func checkWrapSpanForLogsExporter(t *testing.T, sr *tracetest.SpanRecorder, trac require.Equalf(t, fakeLogsParentSpanName, parentSpan.Name(), "SpanData %v", parentSpan) for _, sd := range gotSpanData[:numRequests] { require.Equalf(t, parentSpan.SpanContext(), sd.Parent(), "Exporter span not a child\nSpanData %v", sd) - checkStatus(t, sd, wantError) + internal.CheckStatus(t, sd, wantError) sentLogRecords := numLogRecords var failedToSendLogRecords int64 diff --git a/exporter/exporterhelper/metrics.go b/exporter/exporterhelper/metrics.go index 382c9b9ce8a..f78fc2fc972 100644 --- a/exporter/exporterhelper/metrics.go +++ b/exporter/exporterhelper/metrics.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/pmetric" @@ -64,7 +65,7 @@ func (req *metricsRequest) ItemsCount() int { } type metricsExporter struct { - *baseExporter + *internal.BaseExporter consumer.Metrics } @@ -83,8 +84,8 @@ func NewMetricsExporter( return nil, errNilPushMetricsData } metricsOpts := []Option{ - withMarshaler(metricsRequestMarshaler), withUnmarshaler(newMetricsRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeMetrics, mergeSplitMetrics), + internal.WithMarshaler(metricsRequestMarshaler), internal.WithUnmarshaler(newMetricsRequestUnmarshalerFunc(pusher)), + internal.WithBatchFuncs(mergeMetrics, mergeSplitMetrics), } return NewMetricsRequestExporter(ctx, set, requestFromMetrics(pusher), append(metricsOpts, options...)...) } @@ -118,7 +119,7 @@ func NewMetricsRequestExporter( return nil, errNilMetricsConverter } - be, err := newBaseExporter(set, component.DataTypeMetrics, newMetricsSenderWithObservability, options...) + be, err := internal.NewBaseExporter(set, component.DataTypeMetrics, newMetricsSenderWithObservability, options...) if err != nil { return nil, err } @@ -131,32 +132,32 @@ func NewMetricsRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) + sErr := be.Send(ctx, req) if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeMetrics, int64(req.ItemsCount())) + be.Obsrep.RecordEnqueueFailure(ctx, component.DataTypeMetrics, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) return &metricsExporter{ - baseExporter: be, + BaseExporter: be, Metrics: mc, }, err } type metricsSenderWithObservability struct { - baseRequestSender - obsrep *obsReport + internal.BaseRequestSender + obsrep *internal.ObsReport } -func newMetricsSenderWithObservability(obsrep *obsReport) requestSender { +func newMetricsSenderWithObservability(obsrep *internal.ObsReport) internal.RequestSender { return &metricsSenderWithObservability{obsrep: obsrep} } -func (mewo *metricsSenderWithObservability) send(ctx context.Context, req Request) error { - c := mewo.obsrep.startMetricsOp(ctx) +func (mewo *metricsSenderWithObservability) Send(ctx context.Context, req Request) error { + c := mewo.obsrep.StartMetricsOp(ctx) numMetricDataPoints := req.ItemsCount() - err := mewo.nextSender.send(c, req) - mewo.obsrep.endMetricsOp(c, numMetricDataPoints, err) + err := mewo.NextSender.Send(c, req) + mewo.obsrep.EndMetricsOp(c, numMetricDataPoints, err) return err } diff --git a/exporter/exporterhelper/metrics_test.go b/exporter/exporterhelper/metrics_test.go index 02381396547..ef3bbc4f6e2 100644 --- a/exporter/exporterhelper/metrics_test.go +++ b/exporter/exporterhelper/metrics_test.go @@ -66,7 +66,7 @@ func TestMetricsExporter_NilLogger(t *testing.T) { func TestMetricsRequestExporter_NilLogger(t *testing.T) { me, err := NewMetricsRequestExporter(context.Background(), exporter.Settings{}, - (&fakeRequestConverter{}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc) require.Nil(t, me) require.Equal(t, errNilLogger, err) } @@ -98,7 +98,7 @@ func TestMetricsExporter_Default(t *testing.T) { func TestMetricsRequestExporter_Default(t *testing.T) { md := pmetric.NewMetrics() me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc) require.NoError(t, err) assert.NotNil(t, me) @@ -120,7 +120,7 @@ func TestMetricsExporter_WithCapabilities(t *testing.T) { func TestMetricsRequestExporter_WithCapabilities(t *testing.T) { capabilities := consumer.Capabilities{MutatesData: true} me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromMetricsFunc, WithCapabilities(capabilities)) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc, WithCapabilities(capabilities)) require.NoError(t, err) assert.NotNil(t, me) @@ -140,7 +140,7 @@ func TestMetricsRequestExporter_Default_ConvertError(t *testing.T) { md := pmetric.NewMetrics() want := errors.New("convert_error") me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{metricsError: want}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{MetricsError: want}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) require.Equal(t, consumererror.NewPermanent(want), me.ConsumeMetrics(context.Background(), md)) @@ -150,7 +150,7 @@ func TestMetricsRequestExporter_Default_ExportError(t *testing.T) { md := pmetric.NewMetrics() want := errors.New("export_error") me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{requestError: want}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) require.Equal(t, want, me.ConsumeMetrics(context.Background(), md)) @@ -167,7 +167,7 @@ func TestMetricsExporter_WithPersistentQueue(t *testing.T) { te, err := NewMetricsExporter(context.Background(), set, &fakeTracesExporterConfig, ms.ConsumeMetrics, WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - host := &mockHost{ext: map[component.ID]component.Component{ + host := &internal.MockHost{Ext: map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(nil), }} require.NoError(t, te.Start(context.Background(), host)) @@ -214,7 +214,7 @@ func TestMetricsRequestExporter_WithRecordMetrics(t *testing.T) { me, err := NewMetricsRequestExporter(context.Background(), exporter.Settings{ID: fakeMetricsExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) @@ -242,7 +242,7 @@ func TestMetricsRequestExporter_WithRecordMetrics_ExportError(t *testing.T) { me, err := NewMetricsRequestExporter(context.Background(), exporter.Settings{ID: fakeMetricsExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{requestError: want}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) @@ -294,7 +294,7 @@ func TestMetricsRequestExporter_WithSpan(t *testing.T) { otel.SetTracerProvider(set.TracerProvider) defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) - me, err := NewMetricsRequestExporter(context.Background(), set, (&fakeRequestConverter{}).requestFromMetricsFunc) + me, err := NewMetricsRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) checkWrapSpanForMetricsExporter(t, sr, set.TracerProvider.Tracer("test"), me, nil, 2) @@ -322,7 +322,7 @@ func TestMetricsRequestExporter_WithSpan_ExportError(t *testing.T) { defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) want := errors.New("my_error") - me, err := NewMetricsRequestExporter(context.Background(), set, (&fakeRequestConverter{requestError: want}).requestFromMetricsFunc) + me, err := NewMetricsRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{RequestError: want}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) checkWrapSpanForMetricsExporter(t, sr, set.TracerProvider.Tracer("test"), me, want, 2) @@ -346,7 +346,7 @@ func TestMetricsRequestExporter_WithShutdown(t *testing.T) { shutdown := func(context.Context) error { shutdownCalled = true; return nil } me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromMetricsFunc, WithShutdown(shutdown)) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc, WithShutdown(shutdown)) assert.NotNil(t, me) assert.NoError(t, err) @@ -372,7 +372,7 @@ func TestMetricsRequestExporter_WithShutdown_ReturnError(t *testing.T) { shutdownErr := func(context.Context) error { return want } me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromMetricsFunc, WithShutdown(shutdownErr)) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc, WithShutdown(shutdownErr)) assert.NotNil(t, me) assert.NoError(t, err) @@ -430,7 +430,7 @@ func checkWrapSpanForMetricsExporter(t *testing.T, sr *tracetest.SpanRecorder, t require.Equalf(t, fakeMetricsParentSpanName, parentSpan.Name(), "SpanData %v", parentSpan) for _, sd := range gotSpanData[:numRequests] { require.Equalf(t, parentSpan.SpanContext(), sd.Parent(), "Exporter span not a child\nSpanData %v", sd) - checkStatus(t, sd, wantError) + internal.CheckStatus(t, sd, wantError) sentMetricPoints := numMetricPoints var failedToSendMetricPoints int64 diff --git a/exporter/exporterhelper/obsreport_test.go b/exporter/exporterhelper/obsreport_test.go index f8ab9aed1c8..80134bc8a62 100644 --- a/exporter/exporterhelper/obsreport_test.go +++ b/exporter/exporterhelper/obsreport_test.go @@ -12,28 +12,31 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" ) +var exporterID = component.MustNewID("fakeExporter") + func TestExportEnqueueFailure(t *testing.T) { tt, err := componenttest.SetupTelemetry(exporterID) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := internal.NewExporter(internal.ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) logRecords := int64(7) - obsrep.recordEnqueueFailure(context.Background(), component.DataTypeLogs, logRecords) + obsrep.RecordEnqueueFailure(context.Background(), component.DataTypeLogs, logRecords) require.NoError(t, tt.CheckExporterEnqueueFailedLogs(logRecords)) spans := int64(12) - obsrep.recordEnqueueFailure(context.Background(), component.DataTypeTraces, spans) + obsrep.RecordEnqueueFailure(context.Background(), component.DataTypeTraces, spans) require.NoError(t, tt.CheckExporterEnqueueFailedTraces(spans)) metricPoints := int64(21) - obsrep.recordEnqueueFailure(context.Background(), component.DataTypeMetrics, metricPoints) + obsrep.RecordEnqueueFailure(context.Background(), component.DataTypeMetrics, metricPoints) require.NoError(t, tt.CheckExporterEnqueueFailedMetrics(metricPoints)) } diff --git a/exporter/exporterhelper/queue_sender.go b/exporter/exporterhelper/queue_sender.go index 58edbcc8732..b81e2036fab 100644 --- a/exporter/exporterhelper/queue_sender.go +++ b/exporter/exporterhelper/queue_sender.go @@ -3,143 +3,20 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" -import ( - "context" - "errors" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" - "go.uber.org/multierr" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal" - "go.opentelemetry.io/collector/exporter/exporterqueue" - "go.opentelemetry.io/collector/exporter/internal/queue" -) - -const defaultQueueSize = 1000 +import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" // Deprecated: [v0.110.0] Use QueueConfig instead. -type QueueSettings = QueueConfig +type QueueSettings = internal.QueueConfig // QueueConfig defines configuration for queueing batches before sending to the consumerSender. -type QueueConfig struct { - // Enabled indicates whether to not enqueue batches before sending to the consumerSender. - Enabled bool `mapstructure:"enabled"` - // NumConsumers is the number of consumers from the queue. Defaults to 10. - // If batching is enabled, a combined batch cannot contain more requests than the number of consumers. - // So it's recommended to set higher number of consumers if batching is enabled. - NumConsumers int `mapstructure:"num_consumers"` - // QueueSize is the maximum number of batches allowed in queue at a given time. - QueueSize int `mapstructure:"queue_size"` - // StorageID if not empty, enables the persistent storage and uses the component specified - // as a storage extension for the persistent queue - StorageID *component.ID `mapstructure:"storage"` -} +type QueueConfig = internal.QueueConfig // Deprecated: [v0.110.0] Use NewDefaultQueueConfig instead. func NewDefaultQueueSettings() QueueSettings { - return NewDefaultQueueConfig() + return internal.NewDefaultQueueConfig() } // NewDefaultQueueConfig returns the default config for QueueConfig. func NewDefaultQueueConfig() QueueConfig { - return QueueConfig{ - Enabled: true, - NumConsumers: 10, - // By default, batches are 8192 spans, for a total of up to 8 million spans in the queue - // This can be estimated at 1-4 GB worth of maximum memory usage - // This default is probably still too high, and may be adjusted further down in a future release - QueueSize: defaultQueueSize, - } -} - -// Validate checks if the QueueConfig configuration is valid -func (qCfg *QueueConfig) Validate() error { - if !qCfg.Enabled { - return nil - } - - if qCfg.QueueSize <= 0 { - return errors.New("queue size must be positive") - } - - if qCfg.NumConsumers <= 0 { - return errors.New("number of queue consumers must be positive") - } - - return nil -} - -type queueSender struct { - baseRequestSender - queue exporterqueue.Queue[Request] - numConsumers int - traceAttribute attribute.KeyValue - consumers *queue.Consumers[Request] - - obsrep *obsReport - exporterID component.ID -} - -func newQueueSender(q exporterqueue.Queue[Request], set exporter.Settings, numConsumers int, - exportFailureMessage string, obsrep *obsReport) *queueSender { - qs := &queueSender{ - queue: q, - numConsumers: numConsumers, - traceAttribute: attribute.String(internal.ExporterKey, set.ID.String()), - obsrep: obsrep, - exporterID: set.ID, - } - consumeFunc := func(ctx context.Context, req Request) error { - err := qs.nextSender.send(ctx, req) - if err != nil { - set.Logger.Error("Exporting failed. Dropping data."+exportFailureMessage, - zap.Error(err), zap.Int("dropped_items", req.ItemsCount())) - } - return err - } - qs.consumers = queue.NewQueueConsumers[Request](q, numConsumers, consumeFunc) - return qs -} - -// Start is invoked during service startup. -func (qs *queueSender) Start(ctx context.Context, host component.Host) error { - if err := qs.consumers.Start(ctx, host); err != nil { - return err - } - - dataTypeAttr := attribute.String(internal.DataTypeKey, qs.obsrep.dataType.String()) - return multierr.Append( - qs.obsrep.telemetryBuilder.InitExporterQueueSize(func() int64 { return int64(qs.queue.Size()) }, - metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute, dataTypeAttr))), - qs.obsrep.telemetryBuilder.InitExporterQueueCapacity(func() int64 { return int64(qs.queue.Capacity()) }, - metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute))), - ) -} - -// Shutdown is invoked during service shutdown. -func (qs *queueSender) Shutdown(ctx context.Context) error { - // Stop the queue and consumers, this will drain the queue and will call the retry (which is stopped) that will only - // try once every request. - return qs.consumers.Shutdown(ctx) -} - -// send implements the requestSender interface. It puts the request in the queue. -func (qs *queueSender) send(ctx context.Context, req Request) error { - // Prevent cancellation and deadline to propagate to the context stored in the queue. - // The grpc/http based receivers will cancel the request context after this function returns. - c := context.WithoutCancel(ctx) - - span := trace.SpanFromContext(c) - if err := qs.queue.Offer(c, req); err != nil { - span.AddEvent("Failed to enqueue item.", trace.WithAttributes(qs.traceAttribute)) - return err - } - - span.AddEvent("Enqueued item.", trace.WithAttributes(qs.traceAttribute)) - return nil + return internal.NewDefaultQueueConfig() } diff --git a/exporter/exporterhelper/retry_sender.go b/exporter/exporterhelper/retry_sender.go index 0caa10ad72f..5b4476bb1f6 100644 --- a/exporter/exporterhelper/retry_sender.go +++ b/exporter/exporterhelper/retry_sender.go @@ -4,139 +4,12 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "errors" - "fmt" "time" - "github.com/cenkalti/backoff/v4" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/config/configretry" - "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper/internal" - "go.opentelemetry.io/collector/exporter/internal/experr" ) -// TODO: Clean this by forcing all exporters to return an internal error type that always include the information about retries. -type throttleRetry struct { - err error - delay time.Duration -} - -func (t throttleRetry) Error() string { - return "Throttle (" + t.delay.String() + "), error: " + t.err.Error() -} - -func (t throttleRetry) Unwrap() error { - return t.err -} - // NewThrottleRetry creates a new throttle retry error. func NewThrottleRetry(err error, delay time.Duration) error { - return throttleRetry{ - err: err, - delay: delay, - } -} - -type retrySender struct { - baseRequestSender - traceAttribute attribute.KeyValue - cfg configretry.BackOffConfig - stopCh chan struct{} - logger *zap.Logger -} - -func newRetrySender(config configretry.BackOffConfig, set exporter.Settings) *retrySender { - return &retrySender{ - traceAttribute: attribute.String(internal.ExporterKey, set.ID.String()), - cfg: config, - stopCh: make(chan struct{}), - logger: set.Logger, - } -} - -func (rs *retrySender) Shutdown(context.Context) error { - close(rs.stopCh) - return nil -} - -// send implements the requestSender interface -func (rs *retrySender) send(ctx context.Context, req Request) error { - // Do not use NewExponentialBackOff since it calls Reset and the code here must - // call Reset after changing the InitialInterval (this saves an unnecessary call to Now). - expBackoff := backoff.ExponentialBackOff{ - InitialInterval: rs.cfg.InitialInterval, - RandomizationFactor: rs.cfg.RandomizationFactor, - Multiplier: rs.cfg.Multiplier, - MaxInterval: rs.cfg.MaxInterval, - MaxElapsedTime: rs.cfg.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, - } - expBackoff.Reset() - span := trace.SpanFromContext(ctx) - retryNum := int64(0) - for { - span.AddEvent( - "Sending request.", - trace.WithAttributes(rs.traceAttribute, attribute.Int64("retry_num", retryNum))) - - err := rs.nextSender.send(ctx, req) - if err == nil { - return nil - } - - // Immediately drop data on permanent errors. - if consumererror.IsPermanent(err) { - return fmt.Errorf("not retryable error: %w", err) - } - - req = extractPartialRequest(req, err) - - backoffDelay := expBackoff.NextBackOff() - if backoffDelay == backoff.Stop { - return fmt.Errorf("no more retries left: %w", err) - } - - throttleErr := throttleRetry{} - if errors.As(err, &throttleErr) { - backoffDelay = max(backoffDelay, throttleErr.delay) - } - - backoffDelayStr := backoffDelay.String() - span.AddEvent( - "Exporting failed. Will retry the request after interval.", - trace.WithAttributes( - rs.traceAttribute, - attribute.String("interval", backoffDelayStr), - attribute.String("error", err.Error()))) - rs.logger.Info( - "Exporting failed. Will retry the request after interval.", - zap.Error(err), - zap.String("interval", backoffDelayStr), - ) - retryNum++ - - // back-off, but get interrupted when shutting down or request is cancelled or timed out. - select { - case <-ctx.Done(): - return fmt.Errorf("request is cancelled or timed out %w", err) - case <-rs.stopCh: - return experr.NewShutdownErr(err) - case <-time.After(backoffDelay): - } - } -} - -// max returns the larger of x or y. -func max(x, y time.Duration) time.Duration { - if x < y { - return y - } - return x + return internal.NewThrottleRetry(err, delay) } diff --git a/exporter/exporterhelper/timeout_sender.go b/exporter/exporterhelper/timeout_sender.go index 9e489f54ded..9788397b7d2 100644 --- a/exporter/exporterhelper/timeout_sender.go +++ b/exporter/exporterhelper/timeout_sender.go @@ -4,55 +4,20 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "errors" - "time" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" ) // Deprecated: [v0.110.0] Use TimeoutConfig instead. type TimeoutSettings = TimeoutConfig -// TimeoutConfig for timeout. The timeout applies to individual attempts to send data to the backend. -type TimeoutConfig struct { - // Timeout is the timeout for every attempt to send data to the backend. - // A zero timeout means no timeout. - Timeout time.Duration `mapstructure:"timeout"` -} - -func (ts *TimeoutConfig) Validate() error { - // Negative timeouts are not acceptable, since all sends will fail. - if ts.Timeout < 0 { - return errors.New("'timeout' must be non-negative") - } - return nil -} +type TimeoutConfig = internal.TimeoutConfig // Deprecated: [v0.110.0] Use NewDefaultTimeoutConfig instead. func NewDefaultTimeoutSettings() TimeoutSettings { - return NewDefaultTimeoutConfig() + return internal.NewDefaultTimeoutConfig() } // NewDefaultTimeoutConfig returns the default config for TimeoutConfig. func NewDefaultTimeoutConfig() TimeoutConfig { - return TimeoutConfig{ - Timeout: 5 * time.Second, - } -} - -// timeoutSender is a requestSender that adds a `timeout` to every request that passes this sender. -type timeoutSender struct { - baseRequestSender - cfg TimeoutConfig -} - -func (ts *timeoutSender) send(ctx context.Context, req Request) error { - // TODO: Remove this by avoiding to create the timeout sender if timeout is 0. - if ts.cfg.Timeout == 0 { - return req.Export(ctx) - } - // Intentionally don't overwrite the context inside the request, because in case of retries deadline will not be - // updated because this deadline most likely is before the next one. - tCtx, cancelFunc := context.WithTimeout(ctx, ts.cfg.Timeout) - defer cancelFunc() - return req.Export(tCtx) + return internal.NewDefaultTimeoutConfig() } diff --git a/exporter/exporterhelper/traces.go b/exporter/exporterhelper/traces.go index 075db219d6a..da057a861bf 100644 --- a/exporter/exporterhelper/traces.go +++ b/exporter/exporterhelper/traces.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/ptrace" @@ -64,7 +65,7 @@ func (req *tracesRequest) ItemsCount() int { } type traceExporter struct { - *baseExporter + *internal.BaseExporter consumer.Traces } @@ -83,8 +84,8 @@ func NewTracesExporter( return nil, errNilPushTraceData } tracesOpts := []Option{ - withMarshaler(tracesRequestMarshaler), withUnmarshaler(newTraceRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeTraces, mergeSplitTraces), + internal.WithMarshaler(tracesRequestMarshaler), internal.WithUnmarshaler(newTraceRequestUnmarshalerFunc(pusher)), + internal.WithBatchFuncs(mergeTraces, mergeSplitTraces), } return NewTracesRequestExporter(ctx, set, requestFromTraces(pusher), append(tracesOpts, options...)...) } @@ -118,7 +119,7 @@ func NewTracesRequestExporter( return nil, errNilTracesConverter } - be, err := newBaseExporter(set, component.DataTypeTraces, newTracesExporterWithObservability, options...) + be, err := internal.NewBaseExporter(set, component.DataTypeTraces, newTracesExporterWithObservability, options...) if err != nil { return nil, err } @@ -131,33 +132,33 @@ func NewTracesRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) + sErr := be.Send(ctx, req) if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeTraces, int64(req.ItemsCount())) + be.Obsrep.RecordEnqueueFailure(ctx, component.DataTypeTraces, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) return &traceExporter{ - baseExporter: be, + BaseExporter: be, Traces: tc, }, err } type tracesExporterWithObservability struct { - baseRequestSender - obsrep *obsReport + internal.BaseRequestSender + obsrep *internal.ObsReport } -func newTracesExporterWithObservability(obsrep *obsReport) requestSender { +func newTracesExporterWithObservability(obsrep *internal.ObsReport) internal.RequestSender { return &tracesExporterWithObservability{obsrep: obsrep} } -func (tewo *tracesExporterWithObservability) send(ctx context.Context, req Request) error { - c := tewo.obsrep.startTracesOp(ctx) +func (tewo *tracesExporterWithObservability) Send(ctx context.Context, req Request) error { + c := tewo.obsrep.StartTracesOp(ctx) numTraceSpans := req.ItemsCount() // Forward the data to the next consumer (this pusher is the next). - err := tewo.nextSender.send(c, req) - tewo.obsrep.endTracesOp(c, numTraceSpans, err) + err := tewo.NextSender.Send(c, req) + tewo.obsrep.EndTracesOp(c, numTraceSpans, err) return err } diff --git a/exporter/exporterhelper/traces_test.go b/exporter/exporterhelper/traces_test.go index 2d3d1cfdf8b..d6feaedc1e4 100644 --- a/exporter/exporterhelper/traces_test.go +++ b/exporter/exporterhelper/traces_test.go @@ -61,7 +61,7 @@ func TestTracesExporter_NilLogger(t *testing.T) { } func TestTracesRequestExporter_NilLogger(t *testing.T) { - te, err := NewTracesRequestExporter(context.Background(), exporter.Settings{}, (&fakeRequestConverter{}).requestFromTracesFunc) + te, err := NewTracesRequestExporter(context.Background(), exporter.Settings{}, (&internal.FakeRequestConverter{}).RequestFromTracesFunc) require.Nil(t, te) require.Equal(t, errNilLogger, err) } @@ -93,7 +93,7 @@ func TestTracesExporter_Default(t *testing.T) { func TestTracesRequestExporter_Default(t *testing.T) { td := ptrace.NewTraces() te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromTracesFunc) + (&internal.FakeRequestConverter{}).RequestFromTracesFunc) assert.NotNil(t, te) require.NoError(t, err) @@ -115,7 +115,7 @@ func TestTracesExporter_WithCapabilities(t *testing.T) { func TestTracesRequestExporter_WithCapabilities(t *testing.T) { capabilities := consumer.Capabilities{MutatesData: true} te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromTracesFunc, WithCapabilities(capabilities)) + (&internal.FakeRequestConverter{}).RequestFromTracesFunc, WithCapabilities(capabilities)) assert.NotNil(t, te) require.NoError(t, err) @@ -137,7 +137,7 @@ func TestTracesRequestExporter_Default_ConvertError(t *testing.T) { td := ptrace.NewTraces() want := errors.New("convert_error") te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{tracesError: want}).requestFromTracesFunc) + (&internal.FakeRequestConverter{TracesError: want}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) require.Equal(t, consumererror.NewPermanent(want), te.ConsumeTraces(context.Background(), td)) @@ -147,7 +147,7 @@ func TestTracesRequestExporter_Default_ExportError(t *testing.T) { td := ptrace.NewTraces() want := errors.New("export_error") te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{requestError: want}).requestFromTracesFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) require.Equal(t, want, te.ConsumeTraces(context.Background(), td)) @@ -164,7 +164,7 @@ func TestTracesExporter_WithPersistentQueue(t *testing.T) { te, err := NewTracesExporter(context.Background(), set, &fakeTracesExporterConfig, ts.ConsumeTraces, WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - host := &mockHost{ext: map[component.ID]component.Component{ + host := &internal.MockHost{Ext: map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(nil), }} require.NoError(t, te.Start(context.Background(), host)) @@ -211,7 +211,7 @@ func TestTracesRequestExporter_WithRecordMetrics(t *testing.T) { te, err := NewTracesRequestExporter(context.Background(), exporter.Settings{ID: fakeTracesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{}).requestFromTracesFunc) + (&internal.FakeRequestConverter{}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) @@ -239,7 +239,7 @@ func TestTracesRequestExporter_WithRecordMetrics_RequestSenderError(t *testing.T te, err := NewTracesRequestExporter(context.Background(), exporter.Settings{ID: fakeTracesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{requestError: want}).requestFromTracesFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) @@ -292,7 +292,7 @@ func TestTracesRequestExporter_WithSpan(t *testing.T) { otel.SetTracerProvider(set.TracerProvider) defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) - te, err := NewTracesRequestExporter(context.Background(), set, (&fakeRequestConverter{}).requestFromTracesFunc) + te, err := NewTracesRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) @@ -322,7 +322,7 @@ func TestTracesRequestExporter_WithSpan_ExportError(t *testing.T) { defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) want := errors.New("export_error") - te, err := NewTracesRequestExporter(context.Background(), set, (&fakeRequestConverter{requestError: want}).requestFromTracesFunc) + te, err := NewTracesRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{RequestError: want}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) @@ -347,7 +347,7 @@ func TestTracesRequestExporter_WithShutdown(t *testing.T) { shutdown := func(context.Context) error { shutdownCalled = true; return nil } te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromTracesFunc, WithShutdown(shutdown)) + (&internal.FakeRequestConverter{}).RequestFromTracesFunc, WithShutdown(shutdown)) assert.NotNil(t, te) assert.NoError(t, err) @@ -373,7 +373,7 @@ func TestTracesRequestExporter_WithShutdown_ReturnError(t *testing.T) { shutdownErr := func(context.Context) error { return want } te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromTracesFunc, WithShutdown(shutdownErr)) + (&internal.FakeRequestConverter{}).RequestFromTracesFunc, WithShutdown(shutdownErr)) assert.NotNil(t, te) assert.NoError(t, err) @@ -433,7 +433,7 @@ func checkWrapSpanForTracesExporter(t *testing.T, sr *tracetest.SpanRecorder, tr for _, sd := range gotSpanData[:numRequests] { require.Equalf(t, parentSpan.SpanContext(), sd.Parent(), "Exporter span not a child\nSpanData %v", sd) - checkStatus(t, sd, wantError) + internal.CheckStatus(t, sd, wantError) sentSpans := numSpans var failedToSendSpans int64