From 1f551af086d2443cbd8ed846d85e9abb8046f14e Mon Sep 17 00:00:00 2001 From: Rajkumar Rangaraj Date: Wed, 27 Nov 2024 17:05:19 -0800 Subject: [PATCH 01/10] Replace logs with new impl --- .../ExportClient/OtlpGrpcLogExportClient.cs | 45 -- .../ExportClient/OtlpHttpLogExportClient.cs | 69 --- .../OtlpLogRecordTransformer.cs | 281 ---------- .../OtlpExporterOptionsExtensions.cs | 43 -- .../OtlpLogExporter.cs | 79 ++- .../OtlpLogExporterHelperExtensions.cs | 14 +- .../ProtobufOtlpLogExporter.cs | 127 ----- .../OtlpExporterOptionsExtensionsTests.cs | 11 +- .../OtlpLogExporterTests.cs | 526 ++++-------------- 9 files changed, 179 insertions(+), 1016 deletions(-) delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpGrpcLogExportClient.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpHttpLogExportClient.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/OtlpLogRecordTransformer.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/ProtobufOtlpLogExporter.cs diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpGrpcLogExportClient.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpGrpcLogExportClient.cs deleted file mode 100644 index dc9d31feeb3..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpGrpcLogExportClient.cs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using Grpc.Core; -using OtlpCollector = OpenTelemetry.Proto.Collector.Logs.V1; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; - -/// Class for sending OTLP Logs export request over gRPC. -internal sealed class OtlpGrpcLogExportClient : BaseOtlpGrpcExportClient -{ - private readonly OtlpCollector.LogsService.LogsServiceClient logsClient; - - public OtlpGrpcLogExportClient(OtlpExporterOptions options, OtlpCollector.LogsService.LogsServiceClient? logsServiceClient = null) - : base(options) - { - if (logsServiceClient != null) - { - this.logsClient = logsServiceClient; - } - else - { - this.Channel = options.CreateChannel(); - this.logsClient = new OtlpCollector.LogsService.LogsServiceClient(this.Channel); - } - } - - /// - public override ExportClientResponse SendExportRequest(OtlpCollector.ExportLogsServiceRequest request, DateTime deadlineUtc, CancellationToken cancellationToken = default) - { - try - { - this.logsClient.Export(request, headers: this.Headers, deadline: deadlineUtc, cancellationToken: cancellationToken); - - // We do not need to return back response and deadline for successful response so using cached value. - return SuccessExportResponse; - } - catch (RpcException ex) - { - OpenTelemetryProtocolExporterEventSource.Log.FailedToReachCollector(this.Endpoint, ex); - - return new ExportClientGrpcResponse(success: false, deadlineUtc, ex, null, null); - } - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpHttpLogExportClient.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpHttpLogExportClient.cs deleted file mode 100644 index 6347ece8f3b..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpHttpLogExportClient.cs +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using System.Net; -#if NETFRAMEWORK -using System.Net.Http; -#endif -using System.Net.Http.Headers; -using System.Runtime.CompilerServices; -using Google.Protobuf; -using OtlpCollector = OpenTelemetry.Proto.Collector.Logs.V1; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; - -/// Class for sending OTLP log export request over HTTP. -internal sealed class OtlpHttpLogExportClient : BaseOtlpHttpExportClient -{ - internal const string MediaContentType = "application/x-protobuf"; - private const string LogsExportPath = "v1/logs"; - - public OtlpHttpLogExportClient(OtlpExporterOptions options, HttpClient httpClient) - : base(options, httpClient, LogsExportPath) - { - } - - protected override HttpContent CreateHttpContent(OtlpCollector.ExportLogsServiceRequest exportRequest) - { - return new ExportRequestContent(exportRequest); - } - - internal sealed class ExportRequestContent : HttpContent - { - private static readonly MediaTypeHeaderValue ProtobufMediaTypeHeader = new(MediaContentType); - - private readonly OtlpCollector.ExportLogsServiceRequest exportRequest; - - public ExportRequestContent(OtlpCollector.ExportLogsServiceRequest exportRequest) - { - this.exportRequest = exportRequest; - this.Headers.ContentType = ProtobufMediaTypeHeader; - } - -#if NET - protected override void SerializeToStream(Stream stream, TransportContext? context, CancellationToken cancellationToken) - { - this.SerializeToStreamInternal(stream); - } -#endif - - protected override Task SerializeToStreamAsync(Stream stream, TransportContext? context) - { - this.SerializeToStreamInternal(stream); - return Task.CompletedTask; - } - - protected override bool TryComputeLength(out long length) - { - // We can't know the length of the content being pushed to the output stream. - length = -1; - return false; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private void SerializeToStreamInternal(Stream stream) - { - this.exportRequest.WriteTo(stream); - } - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/OtlpLogRecordTransformer.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/OtlpLogRecordTransformer.cs deleted file mode 100644 index e6942286e22..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/OtlpLogRecordTransformer.cs +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using System.Collections.Concurrent; -using System.Runtime.CompilerServices; -using Google.Protobuf; -using OpenTelemetry.Internal; -using OpenTelemetry.Logs; -using OpenTelemetry.Trace; -using OtlpCollector = OpenTelemetry.Proto.Collector.Logs.V1; -using OtlpCommon = OpenTelemetry.Proto.Common.V1; -using OtlpLogs = OpenTelemetry.Proto.Logs.V1; -using OtlpResource = OpenTelemetry.Proto.Resource.V1; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; - -internal sealed class OtlpLogRecordTransformer -{ - internal static readonly ConcurrentBag LogListPool = new(); - - private readonly SdkLimitOptions sdkLimitOptions; - private readonly ExperimentalOptions experimentalOptions; - - public OtlpLogRecordTransformer(SdkLimitOptions sdkLimitOptions, ExperimentalOptions experimentalOptions) - { - this.sdkLimitOptions = sdkLimitOptions; - this.experimentalOptions = experimentalOptions; - } - - internal OtlpCollector.ExportLogsServiceRequest BuildExportRequest( - OtlpResource.Resource processResource, - in Batch logRecordBatch) - { - // TODO: https://github.com/open-telemetry/opentelemetry-dotnet/issues/4943 - Dictionary logsByCategory = new Dictionary(); - - var request = new OtlpCollector.ExportLogsServiceRequest(); - - var resourceLogs = new OtlpLogs.ResourceLogs - { - Resource = processResource, - }; - request.ResourceLogs.Add(resourceLogs); - - foreach (var logRecord in logRecordBatch) - { - var otlpLogRecord = this.ToOtlpLog(logRecord); - if (otlpLogRecord != null) - { - var scopeName = logRecord.Logger.Name; - if (!logsByCategory.TryGetValue(scopeName, out var scopeLogs)) - { - scopeLogs = this.GetLogListFromPool(scopeName); - logsByCategory.Add(scopeName, scopeLogs); - resourceLogs.ScopeLogs.Add(scopeLogs); - } - - scopeLogs.LogRecords.Add(otlpLogRecord); - } - } - - return request; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void Return(OtlpCollector.ExportLogsServiceRequest request) - { - var resourceLogs = request.ResourceLogs.FirstOrDefault(); - if (resourceLogs == null) - { - return; - } - - foreach (var scope in resourceLogs.ScopeLogs) - { - scope.LogRecords.Clear(); - LogListPool.Add(scope); - } - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal OtlpLogs.ScopeLogs GetLogListFromPool(string name) - { - if (!LogListPool.TryTake(out var logs)) - { - logs = new OtlpLogs.ScopeLogs - { - Scope = new OtlpCommon.InstrumentationScope - { - Name = name, // Name is enforced to not be null, but it can be empty. - Version = string.Empty, // proto requires this to be non-null. - }, - }; - } - else - { - logs.Scope.Name = name; - logs.Scope.Version = string.Empty; - } - - return logs; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal OtlpLogs.LogRecord? ToOtlpLog(LogRecord logRecord) - { - OtlpLogs.LogRecord? otlpLogRecord = null; - - try - { - var timestamp = (ulong)logRecord.Timestamp.ToUnixTimeNanoseconds(); - otlpLogRecord = new OtlpLogs.LogRecord - { - TimeUnixNano = timestamp, - ObservedTimeUnixNano = timestamp, - SeverityNumber = GetSeverityNumber(logRecord.Severity), - }; - - if (!string.IsNullOrWhiteSpace(logRecord.SeverityText)) - { - otlpLogRecord.SeverityText = logRecord.SeverityText; - } - else if (logRecord.Severity.HasValue) - { - otlpLogRecord.SeverityText = logRecord.Severity.Value.ToShortName(); - } - - var attributeValueLengthLimit = this.sdkLimitOptions.LogRecordAttributeValueLengthLimit; - var attributeCountLimit = this.sdkLimitOptions.LogRecordAttributeCountLimit ?? int.MaxValue; - - if (this.experimentalOptions.EmitLogEventAttributes) - { - if (logRecord.EventId.Id != default) - { - AddIntAttribute(otlpLogRecord, ExperimentalOptions.LogRecordEventIdAttribute, logRecord.EventId.Id, attributeCountLimit); - } - - if (!string.IsNullOrEmpty(logRecord.EventId.Name)) - { - AddStringAttribute(otlpLogRecord, ExperimentalOptions.LogRecordEventNameAttribute, logRecord.EventId.Name, attributeCountLimit, attributeValueLengthLimit); - } - } - - if (logRecord.Exception != null) - { - AddStringAttribute(otlpLogRecord, SemanticConventions.AttributeExceptionType, logRecord.Exception.GetType().Name, attributeCountLimit, attributeValueLengthLimit); - AddStringAttribute(otlpLogRecord, SemanticConventions.AttributeExceptionMessage, logRecord.Exception.Message, attributeCountLimit, attributeValueLengthLimit); - AddStringAttribute(otlpLogRecord, SemanticConventions.AttributeExceptionStacktrace, logRecord.Exception.ToInvariantString(), attributeCountLimit, attributeValueLengthLimit); - } - - bool bodyPopulatedFromFormattedMessage = false; - if (logRecord.FormattedMessage != null) - { - otlpLogRecord.Body = new OtlpCommon.AnyValue { StringValue = logRecord.FormattedMessage }; - bodyPopulatedFromFormattedMessage = true; - } - - if (logRecord.Attributes != null) - { - foreach (var attribute in logRecord.Attributes) - { - // Special casing {OriginalFormat} - // See https://github.com/open-telemetry/opentelemetry-dotnet/pull/3182 - // for explanation. - if (attribute.Key.Equals("{OriginalFormat}") && !bodyPopulatedFromFormattedMessage) - { - otlpLogRecord.Body = new OtlpCommon.AnyValue { StringValue = attribute.Value as string }; - } - else - { - AddAttribute(otlpLogRecord, attribute, attributeCountLimit, attributeValueLengthLimit); - } - } - - // Supports setting Body directly on LogRecord for the Logs Bridge API. - if (otlpLogRecord.Body == null && logRecord.Body != null) - { - // If {OriginalFormat} is not present in the attributes, - // use logRecord.Body if it is set. - otlpLogRecord.Body = new OtlpCommon.AnyValue { StringValue = logRecord.Body }; - } - } - - if (logRecord.TraceId != default && logRecord.SpanId != default) - { - byte[] traceIdBytes = new byte[16]; - byte[] spanIdBytes = new byte[8]; - - logRecord.TraceId.CopyTo(traceIdBytes); - logRecord.SpanId.CopyTo(spanIdBytes); - - otlpLogRecord.TraceId = UnsafeByteOperations.UnsafeWrap(traceIdBytes); - otlpLogRecord.SpanId = UnsafeByteOperations.UnsafeWrap(spanIdBytes); - otlpLogRecord.Flags = (uint)logRecord.TraceFlags; - } - - logRecord.ForEachScope(ProcessScope, otlpLogRecord); - - void ProcessScope(LogRecordScope scope, OtlpLogs.LogRecord otlpLog) - { - foreach (var scopeItem in scope) - { - if (scopeItem.Key.Equals("{OriginalFormat}") || string.IsNullOrEmpty(scopeItem.Key)) - { - // Ignore if the scope key is empty. - // Ignore if the scope key is {OriginalFormat} - // Attributes should not contain duplicates, - // and it is expensive to de-dup, so this - // exporter is going to pass the scope items as is. - // {OriginalFormat} is going to be the key - // if one uses formatted string for scopes - // and if there are nested scopes, this is - // guaranteed to create duplicate keys. - // Similar for empty keys, which is what the - // key is going to be if user simply - // passes a string as scope. - // To summarize this exporter only allows - // IReadOnlyList> - // or IEnumerable>. - // and expect users to provide unique keys. - // Note: It is possible that we allow users - // to override this exporter feature. So not blocking - // empty/{OriginalFormat} in the SDK itself. - } - else - { - AddAttribute(otlpLog, scopeItem, attributeCountLimit, attributeValueLengthLimit); - } - } - } - } - catch (Exception ex) - { - OpenTelemetryProtocolExporterEventSource.Log.CouldNotTranslateLogRecord(ex.Message); - } - - return otlpLogRecord; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void AddAttribute(OtlpLogs.LogRecord logRecord, KeyValuePair attribute, int maxAttributeCount, int? maxValueLength) - { - var logRecordAttributes = logRecord.Attributes; - - if (logRecordAttributes.Count == maxAttributeCount) - { - logRecord.DroppedAttributesCount++; - } - else - { - OtlpTagWriter.Instance.TryWriteTag(ref logRecordAttributes, attribute, maxValueLength); - } - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void AddStringAttribute(OtlpLogs.LogRecord logRecord, string key, string? value, int maxAttributeCount, int? maxValueLength) - { - var attributeItem = new KeyValuePair(key, value); - - AddAttribute(logRecord, attributeItem, maxAttributeCount, maxValueLength); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void AddIntAttribute(OtlpLogs.LogRecord logRecord, string key, int value, int maxAttributeCount) - { - var attributeItem = new KeyValuePair(key, value); - - AddAttribute(logRecord, attributeItem, maxAttributeCount, maxValueLength: null); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static OtlpLogs.SeverityNumber GetSeverityNumber(LogRecordSeverity? severity) - { - if (!severity.HasValue) - { - return OtlpLogs.SeverityNumber.Unspecified; - } - - return (OtlpLogs.SeverityNumber)(int)severity.Value; - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpExporterOptionsExtensions.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpExporterOptionsExtensions.cs index 57198880c47..08728636824 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpExporterOptionsExtensions.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpExporterOptionsExtensions.cs @@ -14,7 +14,6 @@ using System.Diagnostics; using Google.Protobuf; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; -using LogOtlpCollector = OpenTelemetry.Proto.Collector.Logs.V1; using MetricsOtlpCollector = OpenTelemetry.Proto.Collector.Metrics.V1; namespace OpenTelemetry.Exporter; @@ -191,38 +190,6 @@ public static IProtobufExportClient GetProtobufExportClient(this OtlpExporterOpt } } - public static OtlpExporterTransmissionHandler GetLogsExportTransmissionHandler(this OtlpExporterOptions options, ExperimentalOptions experimentalOptions) - { - var exportClient = GetLogExportClient(options); - double timeoutMilliseconds = exportClient is OtlpHttpLogExportClient httpLogExportClient - ? httpLogExportClient.HttpClient.Timeout.TotalMilliseconds - : options.TimeoutMilliseconds; - - if (experimentalOptions.EnableInMemoryRetry) - { - return new OtlpExporterRetryTransmissionHandler(exportClient, timeoutMilliseconds); - } - else if (experimentalOptions.EnableDiskRetry) - { - Debug.Assert(!string.IsNullOrEmpty(experimentalOptions.DiskRetryDirectoryPath), $"{nameof(experimentalOptions.DiskRetryDirectoryPath)} is null or empty"); - - return new OtlpExporterPersistentStorageTransmissionHandler( - exportClient, - timeoutMilliseconds, - (byte[] data) => - { - var request = new LogOtlpCollector.ExportLogsServiceRequest(); - request.MergeFrom(data); - return request; - }, - Path.Combine(experimentalOptions.DiskRetryDirectoryPath, "logs")); - } - else - { - return new OtlpExporterTransmissionHandler(exportClient, timeoutMilliseconds); - } - } - public static IExportClient GetMetricsExportClient(this OtlpExporterOptions options) => options.Protocol switch { @@ -233,16 +200,6 @@ public static IProtobufExportClient GetProtobufExportClient(this OtlpExporterOpt _ => throw new NotSupportedException($"Protocol {options.Protocol} is not supported."), }; - public static IExportClient GetLogExportClient(this OtlpExporterOptions options) => - options.Protocol switch - { - OtlpExportProtocol.Grpc => new OtlpGrpcLogExportClient(options), - OtlpExportProtocol.HttpProtobuf => new OtlpHttpLogExportClient( - options, - options.HttpClientFactory?.Invoke() ?? throw new InvalidOperationException("OtlpExporterOptions was missing HttpClientFactory or it returned null.")), - _ => throw new NotSupportedException($"Protocol {options.Protocol} is not supported."), - }; - public static void TryEnableIHttpClientFactoryIntegration(this OtlpExporterOptions options, IServiceProvider serviceProvider, string httpClientName) { if (serviceProvider != null diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporter.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporter.cs index dec5e8cc3e9..7ef66c9bd52 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporter.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporter.cs @@ -1,12 +1,13 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +using System.Buffers.Binary; using System.Diagnostics; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; +using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Serializer; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; using OpenTelemetry.Logs; -using OtlpCollector = OpenTelemetry.Proto.Collector.Logs.V1; -using OtlpResource = OpenTelemetry.Proto.Resource.V1; +using OpenTelemetry.Resources; namespace OpenTelemetry.Exporter; @@ -16,10 +17,17 @@ namespace OpenTelemetry.Exporter; /// public sealed class OtlpLogExporter : BaseExporter { - private readonly OtlpExporterTransmissionHandler transmissionHandler; - private readonly OtlpLogRecordTransformer otlpLogRecordTransformer; + private readonly SdkLimitOptions sdkLimitOptions; + private readonly ExperimentalOptions experimentalOptions; + private readonly ProtobufOtlpExporterTransmissionHandler transmissionHandler; + private readonly int startWritePosition; - private OtlpResource.Resource? processResource; + private Resource? resource; + + // Initial buffer size set to ~732KB. + // This choice allows us to gradually grow the buffer while targeting a final capacity of around 100 MB, + // by the 7th doubling to maintain efficient allocation without frequent resizing. + private byte[] buffer = new byte[750000]; /// /// Initializes a new instance of the class. @@ -41,19 +49,19 @@ internal OtlpLogExporter( OtlpExporterOptions exporterOptions, SdkLimitOptions sdkLimitOptions, ExperimentalOptions experimentalOptions, - OtlpExporterTransmissionHandler? transmissionHandler = null) + ProtobufOtlpExporterTransmissionHandler? transmissionHandler = null) { Debug.Assert(exporterOptions != null, "exporterOptions was null"); Debug.Assert(sdkLimitOptions != null, "sdkLimitOptions was null"); Debug.Assert(experimentalOptions != null, "experimentalOptions was null"); - this.transmissionHandler = transmissionHandler ?? exporterOptions!.GetLogsExportTransmissionHandler(experimentalOptions!); - - this.otlpLogRecordTransformer = new OtlpLogRecordTransformer(sdkLimitOptions!, experimentalOptions!); + this.experimentalOptions = experimentalOptions!; + this.sdkLimitOptions = sdkLimitOptions!; + this.startWritePosition = exporterOptions!.Protocol == OtlpExportProtocol.Grpc ? 5 : 0; + this.transmissionHandler = transmissionHandler ?? exporterOptions!.GetProtobufExportTransmissionHandler(experimentalOptions!, OtlpSignalType.Logs); } - internal OtlpResource.Resource ProcessResource - => this.processResource ??= this.ParentProvider.GetResource().ToOtlpResource(); + internal Resource Resource => this.resource ??= this.ParentProvider.GetResource(); /// public override ExportResult Export(in Batch logRecordBatch) @@ -61,36 +69,59 @@ public override ExportResult Export(in Batch logRecordBatch) // Prevents the exporter's gRPC and HTTP operations from being instrumented. using var scope = SuppressInstrumentationScope.Begin(); - OtlpCollector.ExportLogsServiceRequest? request = null; - try { - request = this.otlpLogRecordTransformer.BuildExportRequest(this.ProcessResource, logRecordBatch); + int writePosition = ProtobufOtlpLogSerializer.WriteLogsData(this.buffer, this.startWritePosition, this.sdkLimitOptions, this.experimentalOptions, this.Resource, logRecordBatch); + + if (this.startWritePosition == 5) + { + // Grpc payload consists of 3 parts + // byte 0 - Specifying if the payload is compressed. + // 1-4 byte - Specifies the length of payload in big endian format. + // 5 and above - Protobuf serialized data. + Span data = new Span(this.buffer, 1, 4); + var dataLength = writePosition - 5; + BinaryPrimitives.WriteUInt32BigEndian(data, (uint)dataLength); + } - if (!this.transmissionHandler.TrySubmitRequest(request)) + if (!this.transmissionHandler.TrySubmitRequest(this.buffer, writePosition)) { return ExportResult.Failure; } } + catch (IndexOutOfRangeException) + { + if (!this.IncreaseBufferSize()) + { + throw; + } + } catch (Exception ex) { OpenTelemetryProtocolExporterEventSource.Log.ExportMethodException(ex); return ExportResult.Failure; } - finally - { - if (request != null) - { - this.otlpLogRecordTransformer.Return(request); - } - } return ExportResult.Success; } /// - protected override bool OnShutdown(int timeoutMilliseconds) + protected override bool OnShutdown(int timeoutMilliseconds) => this.transmissionHandler?.Shutdown(timeoutMilliseconds) ?? true; + + // TODO: Consider moving this to a shared utility class. + private bool IncreaseBufferSize() { - return this.transmissionHandler?.Shutdown(timeoutMilliseconds) ?? true; + var newBufferSize = this.buffer.Length * 2; + + if (newBufferSize > 100 * 1024 * 1024) + { + return false; + } + + var newBuffer = new byte[newBufferSize]; + this.buffer.CopyTo(newBuffer, 0); + this.buffer = newBuffer; + + return true; } } diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporterHelperExtensions.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporterHelperExtensions.cs index 0929fe0cc70..42faf425356 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporterHelperExtensions.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporterHelperExtensions.cs @@ -305,16 +305,10 @@ internal static BaseProcessor BuildOtlpLogExporter( * "OtlpLogExporter"); */ - BaseExporter otlpExporter; - - if (experimentalOptions != null && experimentalOptions.UseCustomProtobufSerializer) - { - otlpExporter = new ProtobufOtlpLogExporter(exporterOptions!, sdkLimitOptions!, experimentalOptions!); - } - else - { - otlpExporter = new OtlpLogExporter(exporterOptions!, sdkLimitOptions!, experimentalOptions!); - } + BaseExporter otlpExporter = new OtlpLogExporter( + exporterOptions!, + sdkLimitOptions!, + experimentalOptions!); if (configureExporterInstance != null) { diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/ProtobufOtlpLogExporter.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/ProtobufOtlpLogExporter.cs deleted file mode 100644 index 32ad66ccff4..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/ProtobufOtlpLogExporter.cs +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using System.Buffers.Binary; -using System.Diagnostics; -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Serializer; -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; -using OpenTelemetry.Logs; -using OpenTelemetry.Resources; - -namespace OpenTelemetry.Exporter; - -/// -/// Exporter consuming and exporting the data using -/// the OpenTelemetry protocol (OTLP). -/// -internal sealed class ProtobufOtlpLogExporter : BaseExporter -{ - private readonly SdkLimitOptions sdkLimitOptions; - private readonly ExperimentalOptions experimentalOptions; - private readonly ProtobufOtlpExporterTransmissionHandler transmissionHandler; - private readonly int startWritePosition; - - private Resource? resource; - - // Initial buffer size set to ~732KB. - // This choice allows us to gradually grow the buffer while targeting a final capacity of around 100 MB, - // by the 7th doubling to maintain efficient allocation without frequent resizing. - private byte[] buffer = new byte[750000]; - - /// - /// Initializes a new instance of the class. - /// - /// Configuration options for the exporter. - public ProtobufOtlpLogExporter(OtlpExporterOptions options) - : this(options, sdkLimitOptions: new(), experimentalOptions: new(), transmissionHandler: null) - { - } - - /// - /// Initializes a new instance of the class. - /// - /// . - /// . - /// . - /// . - internal ProtobufOtlpLogExporter( - OtlpExporterOptions exporterOptions, - SdkLimitOptions sdkLimitOptions, - ExperimentalOptions experimentalOptions, - ProtobufOtlpExporterTransmissionHandler? transmissionHandler = null) - { - Debug.Assert(exporterOptions != null, "exporterOptions was null"); - Debug.Assert(sdkLimitOptions != null, "sdkLimitOptions was null"); - Debug.Assert(experimentalOptions != null, "experimentalOptions was null"); - - this.experimentalOptions = experimentalOptions!; - this.sdkLimitOptions = sdkLimitOptions!; - this.startWritePosition = exporterOptions!.Protocol == OtlpExportProtocol.Grpc ? 5 : 0; - this.transmissionHandler = transmissionHandler ?? exporterOptions!.GetProtobufExportTransmissionHandler(experimentalOptions!, OtlpSignalType.Logs); - } - - internal Resource Resource => this.resource ??= this.ParentProvider.GetResource(); - - /// - public override ExportResult Export(in Batch logRecordBatch) - { - // Prevents the exporter's gRPC and HTTP operations from being instrumented. - using var scope = SuppressInstrumentationScope.Begin(); - - try - { - int writePosition = ProtobufOtlpLogSerializer.WriteLogsData(this.buffer, this.startWritePosition, this.sdkLimitOptions, this.experimentalOptions, this.Resource, logRecordBatch); - - if (this.startWritePosition == 5) - { - // Grpc payload consists of 3 parts - // byte 0 - Specifying if the payload is compressed. - // 1-4 byte - Specifies the length of payload in big endian format. - // 5 and above - Protobuf serialized data. - Span data = new Span(this.buffer, 1, 4); - var dataLength = writePosition - 5; - BinaryPrimitives.WriteUInt32BigEndian(data, (uint)dataLength); - } - - if (!this.transmissionHandler.TrySubmitRequest(this.buffer, writePosition)) - { - return ExportResult.Failure; - } - } - catch (IndexOutOfRangeException) - { - if (!this.IncreaseBufferSize()) - { - throw; - } - } - catch (Exception ex) - { - OpenTelemetryProtocolExporterEventSource.Log.ExportMethodException(ex); - return ExportResult.Failure; - } - - return ExportResult.Success; - } - - /// - protected override bool OnShutdown(int timeoutMilliseconds) => this.transmissionHandler?.Shutdown(timeoutMilliseconds) ?? true; - - // TODO: Consider moving this to a shared utility class. - private bool IncreaseBufferSize() - { - var newBufferSize = this.buffer.Length * 2; - - if (newBufferSize > 100 * 1024 * 1024) - { - return false; - } - - var newBuffer = new byte[newBufferSize]; - this.buffer.CopyTo(newBuffer, 0); - this.buffer = newBuffer; - - return true; - } -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpExporterOptionsExtensionsTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpExporterOptionsExtensionsTests.cs index 4e139cd99ec..d7c66ab8054 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpExporterOptionsExtensionsTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpExporterOptionsExtensionsTests.cs @@ -137,27 +137,18 @@ public void AppendPathIfNotPresent_TracesPath_AppendsCorrectly(string inputUri, [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcMetricsExportClient), false, 10000, null)] [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), false, 10000, null)] [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), true, 8000, null)] - [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcLogExportClient), false, 10000, null)] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpLogExportClient), false, 10000, null)] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpLogExportClient), true, 8000, null)] [InlineData(OtlpExportProtocol.Grpc, typeof(ProtobufOtlpGrpcExportClient), false, 10000, "in_memory")] [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(ProtobufOtlpHttpExportClient), false, 10000, "in_memory")] [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(ProtobufOtlpHttpExportClient), true, 8000, "in_memory")] [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcMetricsExportClient), false, 10000, "in_memory")] [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), false, 10000, "in_memory")] [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), true, 8000, "in_memory")] - [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcLogExportClient), false, 10000, "in_memory")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpLogExportClient), false, 10000, "in_memory")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpLogExportClient), true, 8000, "in_memory")] [InlineData(OtlpExportProtocol.Grpc, typeof(ProtobufOtlpGrpcExportClient), false, 10000, "disk")] [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(ProtobufOtlpHttpExportClient), false, 10000, "disk")] [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(ProtobufOtlpHttpExportClient), true, 8000, "disk")] [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcMetricsExportClient), false, 10000, "disk")] [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), false, 10000, "disk")] [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), true, 8000, "disk")] - [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcLogExportClient), false, 10000, "disk")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpLogExportClient), false, 10000, "disk")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpLogExportClient), true, 8000, "disk")] public void GetTransmissionHandler_InitializesCorrectHandlerExportClientAndTimeoutValue(OtlpExportProtocol protocol, Type exportClientType, bool customHttpClient, int expectedTimeoutMilliseconds, string? retryStrategy) { var exporterOptions = new OtlpExporterOptions() { Protocol = protocol }; @@ -187,7 +178,7 @@ public void GetTransmissionHandler_InitializesCorrectHandlerExportClientAndTimeo } else { - var transmissionHandler = exporterOptions.GetLogsExportTransmissionHandler(new ExperimentalOptions(configuration)); + var transmissionHandler = exporterOptions.GetProtobufExportTransmissionHandler(new ExperimentalOptions(configuration), OtlpSignalType.Logs); AssertTransmissionHandler(transmissionHandler, exportClientType, expectedTimeoutMilliseconds, retryStrategy); } diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpLogExporterTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpLogExporterTests.cs index 032e88252e2..626cf68a30c 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpLogExporterTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpLogExporterTests.cs @@ -244,10 +244,8 @@ public void AddOtlpLogExporterParseStateValueCanBeTurnedOffHosting(bool parseSta #pragma warning restore CS0618 // Type or member is obsolete } - [Theory] - [InlineData(true)] - [InlineData(false)] - public void OtlpLogRecordTestWhenStateValuesArePopulated(bool useCustomSerializer) + [Fact] + public void OtlpLogRecordTestWhenStateValuesArePopulated() { var logRecords = new List(); using var loggerFactory = LoggerFactory.Create(builder => @@ -266,19 +264,8 @@ public void OtlpLogRecordTestWhenStateValuesArePopulated(bool useCustomSerialize Assert.Single(logRecords); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - var logRecord = logRecords[0]; - OtlpLogs.LogRecord? otlpLogRecord; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + OtlpLogs.LogRecord? otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Equal("Hello from tomato 2.99.", otlpLogRecord.Body.StringValue); @@ -299,13 +286,10 @@ public void OtlpLogRecordTestWhenStateValuesArePopulated(bool useCustomSerialize } [Theory] - [InlineData("true", true)] - [InlineData("false", true)] - [InlineData(null, true)] - [InlineData("true", false)] - [InlineData("false", false)] - [InlineData(null, false)] - public void CheckToOtlpLogRecordEventId(string? emitLogEventAttributes, bool useCustomSerializer) + [InlineData("true")] + [InlineData("false")] + [InlineData(null)] + public void CheckToOtlpLogRecordEventId(string? emitLogEventAttributes) { var logRecords = new List(); using var loggerFactory = LoggerFactory.Create(builder => @@ -327,20 +311,8 @@ public void CheckToOtlpLogRecordEventId(string? emitLogEventAttributes, bool use .AddInMemoryCollection(new Dictionary { [ExperimentalOptions.EmitLogEventEnvVar] = emitLogEventAttributes }) .Build(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new(configuration)); - var logRecord = logRecords[0]; - - OtlpLogs.LogRecord? otlpLogRecord; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new(configuration), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + OtlpLogs.LogRecord? otlpLogRecord = otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new(configuration), logRecord); Assert.NotNull(otlpLogRecord); Assert.Equal("Hello from tomato 2.99.", otlpLogRecord.Body.StringValue); @@ -364,14 +336,7 @@ public void CheckToOtlpLogRecordEventId(string? emitLogEventAttributes, bool use logRecord = logRecords[0]; - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new(configuration), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new(configuration), logRecord); Assert.NotNull(otlpLogRecord); Assert.Equal("Hello from tomato 2.99.", otlpLogRecord.Body.StringValue); @@ -392,10 +357,8 @@ public void CheckToOtlpLogRecordEventId(string? emitLogEventAttributes, bool use } } - [Theory] - [InlineData(true)] - [InlineData(false)] - public void CheckToOtlpLogRecordTimestamps(bool useCustomSerializer) + [Fact] + public void CheckToOtlpLogRecordTimestamps() { var logRecords = new List(); using var loggerFactory = LoggerFactory.Create(builder => @@ -405,29 +368,17 @@ public void CheckToOtlpLogRecordTimestamps(bool useCustomSerializer) var logger = loggerFactory.CreateLogger("OtlpLogExporterTests"); logger.LogInformation("Log message"); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); var logRecord = logRecords[0]; - OtlpLogs.LogRecord? otlpLogRecord; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + OtlpLogs.LogRecord? otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.True(otlpLogRecord.TimeUnixNano > 0); Assert.True(otlpLogRecord.ObservedTimeUnixNano > 0); } - [Theory] - [InlineData(true)] - [InlineData(false)] - public void CheckToOtlpLogRecordTraceIdSpanIdFlagWithNoActivity(bool useCustomSerializer) + [Fact] + public void CheckToOtlpLogRecordTraceIdSpanIdFlagWithNoActivity() { var logRecords = new List(); using var loggerFactory = LoggerFactory.Create(builder => @@ -438,19 +389,8 @@ public void CheckToOtlpLogRecordTraceIdSpanIdFlagWithNoActivity(bool useCustomSe var logger = loggerFactory.CreateLogger("OtlpLogExporterTests"); logger.LogInformation("Log when there is no activity."); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - var logRecord = logRecords[0]; - OtlpLogs.LogRecord? otlpLogRecord; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + OtlpLogs.LogRecord? otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.Null(Activity.Current); Assert.NotNull(otlpLogRecord); @@ -459,10 +399,8 @@ public void CheckToOtlpLogRecordTraceIdSpanIdFlagWithNoActivity(bool useCustomSe Assert.Equal(0u, otlpLogRecord.Flags); } - [Theory] - [InlineData(true)] - [InlineData(false)] - public void CheckToOtlpLogRecordSpanIdTraceIdAndFlag(bool useCustomSerializer) + [Fact] + public void CheckToOtlpLogRecordSpanIdTraceIdAndFlag() { var logRecords = new List(); using var loggerFactory = LoggerFactory.Create(builder => @@ -482,20 +420,9 @@ public void CheckToOtlpLogRecordSpanIdTraceIdAndFlag(bool useCustomSerializer) expectedSpanId = activity.SpanId; } - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - var logRecord = logRecords[0]; - OtlpLogs.LogRecord? otlpLogRecord; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + OtlpLogs.LogRecord? otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Equal(expectedTraceId.ToString(), ActivityTraceId.CreateFromBytes(otlpLogRecord.TraceId.ToByteArray()).ToString()); @@ -504,19 +431,13 @@ public void CheckToOtlpLogRecordSpanIdTraceIdAndFlag(bool useCustomSerializer) } [Theory] - [InlineData(LogLevel.Trace, true)] - [InlineData(LogLevel.Debug, true)] - [InlineData(LogLevel.Information, true)] - [InlineData(LogLevel.Warning, true)] - [InlineData(LogLevel.Error, true)] - [InlineData(LogLevel.Critical, true)] - [InlineData(LogLevel.Trace, false)] - [InlineData(LogLevel.Debug, false)] - [InlineData(LogLevel.Information, false)] - [InlineData(LogLevel.Warning, false)] - [InlineData(LogLevel.Error, false)] - [InlineData(LogLevel.Critical, false)] - public void CheckToOtlpLogRecordSeverityLevelAndText(LogLevel logLevel, bool useCustomSerializer) + [InlineData(LogLevel.Trace)] + [InlineData(LogLevel.Debug)] + [InlineData(LogLevel.Information)] + [InlineData(LogLevel.Warning)] + [InlineData(LogLevel.Error)] + [InlineData(LogLevel.Critical)] + public void CheckToOtlpLogRecordSeverityLevelAndText(LogLevel logLevel) { var logRecords = new List(); using var loggerFactory = LoggerFactory.Create(builder => @@ -532,19 +453,8 @@ public void CheckToOtlpLogRecordSeverityLevelAndText(LogLevel logLevel, bool use logger.Log(logLevel, "Hello from {name} {price}.", "tomato", 2.99); Assert.Single(logRecords); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - var logRecord = logRecords[0]; - OtlpLogs.LogRecord? otlpLogRecord; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + OtlpLogs.LogRecord? otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); #pragma warning disable CS0618 // Type or member is obsolete @@ -576,11 +486,9 @@ public void CheckToOtlpLogRecordSeverityLevelAndText(LogLevel logLevel, bool use } [Theory] - [InlineData(true, true)] - [InlineData(false, true)] - [InlineData(true, false)] - [InlineData(false, false)] - public void CheckToOtlpLogRecordBodyIsPopulated(bool includeFormattedMessage, bool useCustomSerializer) + [InlineData(true)] + [InlineData(false)] + public void CheckToOtlpLogRecordBodyIsPopulated(bool includeFormattedMessage) { var logRecords = new List(); using var loggerFactory = LoggerFactory.Create(builder => @@ -600,19 +508,8 @@ public void CheckToOtlpLogRecordBodyIsPopulated(bool includeFormattedMessage, bo logger.LogInformation("OpenTelemetry {Greeting} {Subject}!", "Hello", "World"); Assert.Single(logRecords); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - var logRecord = logRecords[0]; - OtlpLogs.LogRecord? otlpLogRecord; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + OtlpLogs.LogRecord? otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); if (includeFormattedMessage) @@ -631,15 +528,7 @@ public void CheckToOtlpLogRecordBodyIsPopulated(bool includeFormattedMessage, bo Assert.Single(logRecords); logRecord = logRecords[0]; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); @@ -655,15 +544,7 @@ public void CheckToOtlpLogRecordBodyIsPopulated(bool includeFormattedMessage, bo Assert.Single(logRecords); logRecord = logRecords[0]; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); @@ -699,9 +580,7 @@ public void LogRecordBodyIsExportedWhenUsingBridgeApi(bool isBodySet) Assert.Equal(2, logRecords.Count); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecords[0]); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecords[0]); Assert.NotNull(otlpLogRecord); if (isBodySet) @@ -713,7 +592,7 @@ public void LogRecordBodyIsExportedWhenUsingBridgeApi(bool isBodySet) Assert.Null(otlpLogRecord.Body); } - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecords[1]); + otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecords[1]); Assert.NotNull(otlpLogRecord); Assert.Equal(2, otlpLogRecord.Attributes.Count); @@ -730,10 +609,8 @@ public void LogRecordBodyIsExportedWhenUsingBridgeApi(bool isBodySet) Assert.Equal("Hello from {name} {price}.", otlpLogRecord.Body.StringValue); } - [Theory] - [InlineData(true)] - [InlineData(false)] - public void CheckToOtlpLogRecordExceptionAttributes(bool useCustomSerializer) + [Fact] + public void CheckToOtlpLogRecordExceptionAttributes() { var logRecords = new List(); using var loggerFactory = LoggerFactory.Create(builder => @@ -747,18 +624,7 @@ public void CheckToOtlpLogRecordExceptionAttributes(bool useCustomSerializer) var logRecord = logRecords[0]; var loggedException = logRecord.Exception; - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - OtlpLogs.LogRecord? otlpLogRecord; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + OtlpLogs.LogRecord? otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); var otlpLogRecordAttributes = otlpLogRecord.Attributes.ToString(); @@ -774,10 +640,8 @@ public void CheckToOtlpLogRecordExceptionAttributes(bool useCustomSerializer) Assert.Contains(logRecord.Exception.ToInvariantString(), otlpLogRecordAttributes); } - [Theory] - [InlineData(true)] - [InlineData(false)] - public void CheckToOtlpLogRecordRespectsAttributeLimits(bool useCustomSerializer) + [Fact] + public void CheckToOtlpLogRecordRespectsAttributeLimits() { var sdkLimitOptions = new SdkLimitOptions { @@ -796,19 +660,8 @@ public void CheckToOtlpLogRecordRespectsAttributeLimits(bool useCustomSerializer var logger = loggerFactory.CreateLogger(string.Empty); logger.LogInformation("OpenTelemetry {AttributeOne} {AttributeTwo} {AttributeThree}!", "I'm an attribute", "I too am an attribute", "I get dropped :("); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(sdkLimitOptions, new()); - var logRecord = logRecords[0]; - OtlpLogs.LogRecord? otlpLogRecord; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(sdkLimitOptions, new(), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + OtlpLogs.LogRecord? otlpLogRecord = ToOtlpLogs(sdkLimitOptions, new(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Equal(1u, otlpLogRecord.DroppedAttributesCount); @@ -832,9 +685,9 @@ public void CheckToOtlpLogRecordRespectsAttributeLimits(bool useCustomSerializer public void Export_WhenExportClientIsProvidedInCtor_UsesProvidedExportClient() { // Arrange. - var testExportClient = new TestExportClient(); + var testExportClient = new TestProtobufExportClient(); var exporterOptions = new OtlpExporterOptions(); - var transmissionHandler = new OtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); + var transmissionHandler = new ProtobufOtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); var emptyLogRecords = Array.Empty(); var emptyBatch = new Batch(emptyLogRecords, emptyLogRecords.Length); var sut = new OtlpLogExporter( @@ -854,9 +707,9 @@ public void Export_WhenExportClientIsProvidedInCtor_UsesProvidedExportClient() public void Export_WhenExportClientThrowsException_ReturnsExportResultFailure() { // Arrange. - var testExportClient = new TestExportClient(throwException: true); + var testExportClient = new TestProtobufExportClient(throwException: true); var exporterOptions = new OtlpExporterOptions(); - var transmissionHandler = new OtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); + var transmissionHandler = new ProtobufOtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); var emptyLogRecords = Array.Empty(); var emptyBatch = new Batch(emptyLogRecords, emptyLogRecords.Length); var sut = new OtlpLogExporter( @@ -876,9 +729,9 @@ public void Export_WhenExportClientThrowsException_ReturnsExportResultFailure() public void Export_WhenExportIsSuccessful_ReturnsExportResultSuccess() { // Arrange. - var testExportClient = new TestExportClient(); + var testExportClient = new TestProtobufExportClient(); var exporterOptions = new OtlpExporterOptions(); - var transmissionHandler = new OtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); + var transmissionHandler = new ProtobufOtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); var emptyLogRecords = Array.Empty(); var emptyBatch = new Batch(emptyLogRecords, emptyLogRecords.Length); var sut = new OtlpLogExporter( @@ -894,10 +747,8 @@ public void Export_WhenExportIsSuccessful_ReturnsExportResultSuccess() Assert.Equal(ExportResult.Success, result); } - [Theory] - [InlineData(true)] - [InlineData(false)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsFalse_DoesNotContainScopeAttribute(bool useCustomSerializer) + [Fact] + public void ToOtlpLog_WhenOptionsIncludeScopesIsFalse_DoesNotContainScopeAttribute() { // Arrange. var logRecords = new List(1); @@ -923,17 +774,7 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsFalse_DoesNotContainScopeAttribu // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - OtlpLogs.LogRecord? otlpLogRecord; - - if (useCustomSerializer) - { - otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); - } - else - { - otlpLogRecord = otlpLogRecordTransformer.ToOtlpLog(logRecord); - } + OtlpLogs.LogRecord? otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); var actualScope = TryGetAttribute(otlpLogRecord, expectedScopeKey); @@ -941,11 +782,9 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsFalse_DoesNotContainScopeAttribu } [Theory] - [InlineData("Some scope value", false)] - [InlineData('a', false)] - [InlineData("Some scope value", true)] - [InlineData('a', true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeStringValue(object scopeValue, bool useCustomSerializer) + [InlineData("Some scope value")] + [InlineData('a')] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeStringValue(object scopeValue) { // Arrange. var logRecords = new List(1); @@ -970,11 +809,7 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeStrin // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Single(otlpLogRecord.Attributes); @@ -986,11 +821,9 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeStrin } [Theory] - [InlineData(true, false)] - [InlineData(false, false)] - [InlineData(true, true)] - [InlineData(false, true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeBoolValue(bool scopeValue, bool useCustomSerializer) + [InlineData(true)] + [InlineData(false)] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeBoolValue(bool scopeValue) { // Arrange. var logRecords = new List(1); @@ -1015,11 +848,7 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeBoolV // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Single(otlpLogRecord.Attributes); @@ -1031,35 +860,21 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeBoolV } [Theory] - [InlineData(byte.MinValue, false)] - [InlineData(byte.MaxValue, false)] - [InlineData(sbyte.MinValue, false)] - [InlineData(sbyte.MaxValue, false)] - [InlineData(short.MinValue, false)] - [InlineData(short.MaxValue, false)] - [InlineData(ushort.MinValue, false)] - [InlineData(ushort.MaxValue, false)] - [InlineData(int.MinValue, false)] - [InlineData(int.MaxValue, false)] - [InlineData(uint.MinValue, false)] - [InlineData(uint.MaxValue, false)] - [InlineData(long.MinValue, false)] - [InlineData(long.MaxValue, false)] - [InlineData(byte.MinValue, true)] - [InlineData(byte.MaxValue, true)] - [InlineData(sbyte.MinValue, true)] - [InlineData(sbyte.MaxValue, true)] - [InlineData(short.MinValue, true)] - [InlineData(short.MaxValue, true)] - [InlineData(ushort.MinValue, true)] - [InlineData(ushort.MaxValue, true)] - [InlineData(int.MinValue, true)] - [InlineData(int.MaxValue, true)] - [InlineData(uint.MinValue, true)] - [InlineData(uint.MaxValue, true)] - [InlineData(long.MinValue, true)] - [InlineData(long.MaxValue, true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeIntValue(object scopeValue, bool useCustomSerializer) + [InlineData(byte.MinValue)] + [InlineData(byte.MaxValue)] + [InlineData(sbyte.MinValue)] + [InlineData(sbyte.MaxValue)] + [InlineData(short.MinValue)] + [InlineData(short.MaxValue)] + [InlineData(ushort.MinValue)] + [InlineData(ushort.MaxValue)] + [InlineData(int.MinValue)] + [InlineData(int.MaxValue)] + [InlineData(uint.MinValue)] + [InlineData(uint.MaxValue)] + [InlineData(long.MinValue)] + [InlineData(long.MaxValue)] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeIntValue(object scopeValue) { // Arrange. var logRecords = new List(1); @@ -1084,11 +899,7 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeIntVa // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Single(otlpLogRecord.Attributes); @@ -1100,11 +911,9 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeIntVa } [Theory] - [InlineData(float.MinValue, false)] - [InlineData(float.MaxValue, false)] - [InlineData(float.MinValue, true)] - [InlineData(float.MaxValue, true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeDoubleValueForFloat(float scopeValue, bool useCustomSerializer) + [InlineData(float.MinValue)] + [InlineData(float.MaxValue)] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeDoubleValueForFloat(float scopeValue) { // Arrange. var logRecords = new List(1); @@ -1129,11 +938,8 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeDoubl // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Single(otlpLogRecord.Attributes); @@ -1145,11 +951,9 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeDoubl } [Theory] - [InlineData(double.MinValue, false)] - [InlineData(double.MaxValue, false)] - [InlineData(double.MinValue, true)] - [InlineData(double.MaxValue, true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeDoubleValueForDouble(double scopeValue, bool useCustomSerializer) + [InlineData(double.MinValue)] + [InlineData(double.MaxValue)] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeDoubleValueForDouble(double scopeValue) { // Arrange. var logRecords = new List(1); @@ -1174,11 +978,7 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeDoubl // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Single(otlpLogRecord.Attributes); @@ -1188,10 +988,8 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_ContainsScopeAttributeDoubl Assert.Equal(scopeValue.ToString(), actualScope.Value.DoubleValue.ToString()); } - [Theory] - [InlineData(false)] - [InlineData(true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfTypeString_ScopeIsIgnored(bool useCustomSerializer) + [Fact] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfTypeString_ScopeIsIgnored() { // Arrange. var logRecords = new List(1); @@ -1213,28 +1011,19 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfTypeString // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Empty(otlpLogRecord.Attributes); } [Theory] - [InlineData(typeof(int), false)] - [InlineData(typeof(float), false)] - [InlineData(typeof(decimal), false)] - [InlineData(typeof(char), false)] - [InlineData(typeof(bool), false)] - [InlineData(typeof(int), true)] - [InlineData(typeof(float), true)] - [InlineData(typeof(decimal), true)] - [InlineData(typeof(char), true)] - [InlineData(typeof(bool), true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfPrimitiveTypes_ScopeIsIgnored(Type typeOfScopeState, bool useCustomSerializer) + [InlineData(typeof(int))] + [InlineData(typeof(float))] + [InlineData(typeof(decimal))] + [InlineData(typeof(char))] + [InlineData(typeof(bool))] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfPrimitiveTypes_ScopeIsIgnored(Type typeOfScopeState) { // Arrange. var logRecords = new List(1); @@ -1257,20 +1046,14 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfPrimitiveT // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Empty(otlpLogRecord.Attributes); } - [Theory] - [InlineData(false)] - [InlineData(true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfDictionaryType_ScopeIsProcessed(bool useCustomSerializer) + [Fact] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfDictionaryType_ScopeIsProcessed() { // Arrange. var logRecords = new List(1); @@ -1294,11 +1077,7 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfDictionary // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Single(otlpLogRecord.Attributes); @@ -1309,13 +1088,10 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfDictionary } [Theory] - [InlineData(typeof(List>), false)] - [InlineData(typeof(ReadOnlyCollection>), false)] - [InlineData(typeof(HashSet>), false)] - [InlineData(typeof(List>), true)] - [InlineData(typeof(ReadOnlyCollection>), true)] - [InlineData(typeof(HashSet>), true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfEnumerableType_ScopeIsProcessed(Type typeOfScopeState, bool useCustomSerializer) + [InlineData(typeof(List>))] + [InlineData(typeof(ReadOnlyCollection>))] + [InlineData(typeof(HashSet>))] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfEnumerableType_ScopeIsProcessed(Type typeOfScopeState) { // Arrange. var logRecords = new List(1); @@ -1341,11 +1117,7 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfEnumerable // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); Assert.Single(otlpLogRecord.Attributes); @@ -1356,11 +1128,9 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeStateIsOfEnumerable } [Theory] - [InlineData("Same scope key", "Same scope key", false)] - [InlineData("Scope key 1", "Scope key 2", false)] - [InlineData("Same scope key", "Same scope key", true)] - [InlineData("Scope key 1", "Scope key 2", true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndMultipleScopesAreAdded_ContainsAllAddedScopeValues(string scopeKey1, string scopeKey2, bool useCustomSerializer) + [InlineData("Same scope key", "Same scope key")] + [InlineData("Scope key 1", "Scope key 2")] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndMultipleScopesAreAdded_ContainsAllAddedScopeValues(string scopeKey1, string scopeKey2) { // Arrange. var logRecords = new List(1); @@ -1387,11 +1157,7 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndMultipleScopesAreAdded_C // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); var allScopeValues = otlpLogRecord.Attributes @@ -1404,11 +1170,9 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndMultipleScopesAreAdded_C } [Theory] - [InlineData("Same scope key", "Same scope key", false)] - [InlineData("Scope key 1", "Scope key 2", false)] - [InlineData("Same scope key", "Same scope key", true)] - [InlineData("Scope key 1", "Scope key 2", true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndMultipleScopeLevelsAreAdded_ContainsAllAddedScopeValues(string scopeKey1, string scopeKey2, bool useCustomSerializer) + [InlineData("Same scope key", "Same scope key")] + [InlineData("Scope key 1", "Scope key 2")] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndMultipleScopeLevelsAreAdded_ContainsAllAddedScopeValues(string scopeKey1, string scopeKey2) { // Arrange. var logRecords = new List(1); @@ -1434,11 +1198,7 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndMultipleScopeLevelsAreAd // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); var allScopeValues = otlpLogRecord.Attributes @@ -1451,11 +1211,9 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndMultipleScopeLevelsAreAd } [Theory] - [InlineData("Same scope key", "Same scope key", false)] - [InlineData("Scope key 1", "Scope key 2", false)] - [InlineData("Same scope key", "Same scope key", true)] - [InlineData("Scope key 1", "Scope key 2", true)] - public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeIsUsedInLogMethod_ContainsAllAddedScopeValues(string scopeKey1, string scopeKey2, bool useCustomSerializer) + [InlineData("Same scope key", "Same scope key")] + [InlineData("Scope key 1", "Scope key 2")] + public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeIsUsedInLogMethod_ContainsAllAddedScopeValues(string scopeKey1, string scopeKey2) { // Arrange. var logRecords = new List(1); @@ -1486,11 +1244,7 @@ public void ToOtlpLog_WhenOptionsIncludeScopesIsTrue_AndScopeIsUsedInLogMethod_C // Assert. var logRecord = logRecords.Single(); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - - var otlpLogRecord = useCustomSerializer - ? ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord) - : otlpLogRecordTransformer.ToOtlpLog(logRecord); + var otlpLogRecord = ToOtlpLogs(DefaultSdkLimitOptions, new ExperimentalOptions(), logRecord); Assert.NotNull(otlpLogRecord); var allScopeValues = otlpLogRecord.Attributes @@ -1571,10 +1325,8 @@ public void AddOtlpLogExporterLogRecordProcessorOptionsTest(ExportProcessorType } } - [Theory] - [InlineData(true)] - [InlineData(false)] - public void ValidateInstrumentationScope(bool useCustomSerializer) + [Fact] + public void ValidateInstrumentationScope() { var logRecords = new List(); using var loggerFactory = LoggerFactory.Create(builder => @@ -1591,20 +1343,10 @@ public void ValidateInstrumentationScope(bool useCustomSerializer) Assert.Equal(2, logRecords.Count); var batch = new Batch(logRecords.ToArray(), logRecords.Count); - var logRecordTransformer = new OtlpLogRecordTransformer(new(), new()); - var resourceBuilder = ResourceBuilder.CreateEmpty(); var processResource = resourceBuilder.Build().ToOtlpResource(); - OtlpCollector.ExportLogsServiceRequest request; - if (useCustomSerializer) - { - request = CreateLogsExportRequest(DefaultSdkLimitOptions, new ExperimentalOptions(), batch, resourceBuilder.Build()); - } - else - { - request = logRecordTransformer.BuildExportRequest(processResource, batch); - } + OtlpCollector.ExportLogsServiceRequest request = CreateLogsExportRequest(DefaultSdkLimitOptions, new ExperimentalOptions(), batch, resourceBuilder.Build()); Assert.Single(request.ResourceLogs); @@ -1624,22 +1366,7 @@ public void ValidateInstrumentationScope(bool useCustomSerializer) Assert.Equal("Hello from green-tomato", logrecord2.Body.StringValue); - // Validate LogListPool - Assert.Empty(OtlpLogRecordTransformer.LogListPool); - logRecordTransformer.Return(request); - Assert.Equal(2, OtlpLogRecordTransformer.LogListPool.Count); - - if (useCustomSerializer) - { - request = CreateLogsExportRequest(DefaultSdkLimitOptions, new ExperimentalOptions(), batch, resourceBuilder.Build()); - } - else - { - request = logRecordTransformer.BuildExportRequest(processResource, batch); - - // ScopeLogs will be reused. - Assert.Empty(OtlpLogRecordTransformer.LogListPool); - } + request = CreateLogsExportRequest(DefaultSdkLimitOptions, new ExperimentalOptions(), batch, resourceBuilder.Build()); Assert.Single(request.ResourceLogs); } @@ -1699,11 +1426,9 @@ public void VerifyEnvironmentVariablesTakenFromIConfigurationWhenUsingLoggingBui } [Theory] - [InlineData("my_instrumentation_scope_name", "my_instrumentation_scope_name", true)] - [InlineData(null, "", true)] - [InlineData("my_instrumentation_scope_name", "my_instrumentation_scope_name", false)] - [InlineData(null, "", false)] - public void LogRecordLoggerNameIsExportedWhenUsingBridgeApi(string? loggerName, string expectedScopeName, bool useCustomSerializer) + [InlineData("my_instrumentation_scope_name", "my_instrumentation_scope_name")] + [InlineData(null, "")] + public void LogRecordLoggerNameIsExportedWhenUsingBridgeApi(string? loggerName, string expectedScopeName) { LogRecordAttributeList attributes = default; attributes.Add("name", "tomato"); @@ -1723,21 +1448,8 @@ public void LogRecordLoggerNameIsExportedWhenUsingBridgeApi(string? loggerName, Assert.Single(logRecords); - var otlpLogRecordTransformer = new OtlpLogRecordTransformer(DefaultSdkLimitOptions, new()); - var batch = new Batch(new[] { logRecords[0] }, 1); - - OtlpCollector.ExportLogsServiceRequest request; - if (useCustomSerializer) - { - request = CreateLogsExportRequest(DefaultSdkLimitOptions, new ExperimentalOptions(), batch, ResourceBuilder.CreateEmpty().Build()); - } - else - { - request = otlpLogRecordTransformer.BuildExportRequest( - new Proto.Resource.V1.Resource(), - batch); - } + OtlpCollector.ExportLogsServiceRequest request = CreateLogsExportRequest(DefaultSdkLimitOptions, new ExperimentalOptions(), batch, ResourceBuilder.CreateEmpty().Build()); Assert.NotNull(request); Assert.Single(request.ResourceLogs); From 9aa77bc7ad630db5922e57b5a0afeaf276107c5b Mon Sep 17 00:00:00 2001 From: Rajkumar Rangaraj Date: Thu, 28 Nov 2024 13:03:46 -0800 Subject: [PATCH 02/10] Remove package reference, remove proto files, replace logs and metrics --- .../ExportClient/BaseOtlpGrpcExportClient.cs | 67 -- .../ExportClient/BaseOtlpHttpExportClient.cs | 110 --- .../ExportClient/IExportClient.cs | 8 +- .../ExportClient/IProtobufExportClient.cs | 30 - ...tlpExportClient.cs => OtlpExportClient.cs} | 19 +- ...xportClient.cs => OtlpGrpcExportClient.cs} | 4 +- .../OtlpGrpcMetricsExportClient.cs | 45 -- ...xportClient.cs => OtlpHttpExportClient.cs} | 4 +- .../OtlpHttpMetricsExportClient.cs | 69 -- .../Implementation/ExportClient/OtlpRetry.cs | 57 +- .../Implementation/MetricItemExtensions.cs | 442 ----------- .../Implementation/OtlpTagWriter.cs | 105 --- .../Implementation/ResourceExtensions.cs | 36 - ...terPersistentStorageTransmissionHandler.cs | 54 +- .../OtlpExporterRetryTransmissionHandler.cs | 8 +- .../OtlpExporterTransmissionHandler.cs | 32 +- ...terPersistentStorageTransmissionHandler.cs | 149 ---- ...bufOtlpExporterRetryTransmissionHandler.cs | 35 - ...ProtobufOtlpExporterTransmissionHandler.cs | 136 ---- .../google/rpc/error_details.proto | 37 - .../Implementation/google/rpc/status.proto | 42 -- .../opentelemetry/proto/collector/README.md | 10 - .../collector/logs/v1/logs_service.proto | 79 -- .../collector/logs/v1/logs_service_http.yaml | 9 - .../metrics/v1/metrics_service.proto | 79 -- .../metrics/v1/metrics_service_http.yaml | 9 - .../v1experimental/profiles_service.proto | 78 -- .../v1experimental/profiles_service_http.yaml | 9 - .../collector/trace/v1/trace_service.proto | 79 -- .../trace/v1/trace_service_http.yaml | 9 - .../proto/common/v1/common.proto | 81 -- .../opentelemetry/proto/logs/v1/logs.proto | 211 ------ .../proto/metrics/v1/metrics.proto | 691 ------------------ .../v1experimental/pprofextended.proto | 388 ---------- .../profiles/v1experimental/profiles.proto | 191 ----- .../proto/resource/v1/resource.proto | 37 - .../opentelemetry/proto/trace/v1/trace.proto | 355 --------- ...etry.Exporter.OpenTelemetryProtocol.csproj | 13 - .../OtlpExporterOptionsExtensions.cs | 111 +-- .../OtlpLogExporter.cs | 6 +- .../OtlpMetricExporter.cs | 70 +- .../OtlpMetricExporterExtensions.cs | 11 +- .../OtlpTraceExporter.cs | 6 +- .../ProtobufOtlpMetricExporter.cs | 120 --- 44 files changed, 124 insertions(+), 4017 deletions(-) delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/BaseOtlpGrpcExportClient.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/BaseOtlpHttpExportClient.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/IProtobufExportClient.cs rename src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/{ProtobufOtlpExportClient.cs => OtlpExportClient.cs} (84%) rename src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/{ProtobufOtlpGrpcExportClient.cs => OtlpGrpcExportClient.cs} (97%) delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpGrpcMetricsExportClient.cs rename src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/{ProtobufOtlpHttpExportClient.cs => OtlpHttpExportClient.cs} (90%) delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpHttpMetricsExportClient.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/MetricItemExtensions.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/OtlpTagWriter.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ResourceExtensions.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterPersistentStorageTransmissionHandler.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterRetryTransmissionHandler.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterTransmissionHandler.cs delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/google/rpc/error_details.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/google/rpc/status.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/README.md delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/common/v1/common.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/logs/v1/logs.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/metrics/v1/metrics.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/resource/v1/resource.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/trace/v1/trace.proto delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/ProtobufOtlpMetricExporter.cs diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/BaseOtlpGrpcExportClient.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/BaseOtlpGrpcExportClient.cs deleted file mode 100644 index 2eab9778852..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/BaseOtlpGrpcExportClient.cs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using Grpc.Core; -using OpenTelemetry.Internal; -#if NETSTANDARD2_1 || NET -using Grpc.Net.Client; -#endif - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; - -/// Base class for sending OTLP export request over gRPC. -/// Type of export request. -internal abstract class BaseOtlpGrpcExportClient : IExportClient -{ - protected static readonly ExportClientGrpcResponse SuccessExportResponse - = new( - success: true, - deadlineUtc: default, - exception: null, - status: null, - grpcStatusDetailsHeader: null); - - protected BaseOtlpGrpcExportClient(OtlpExporterOptions options) - { - Guard.ThrowIfNull(options); - Guard.ThrowIfInvalidTimeout(options.TimeoutMilliseconds); - - this.Endpoint = new UriBuilder(options.Endpoint).Uri; - this.Headers = options.GetMetadataFromHeaders(); - this.TimeoutMilliseconds = options.TimeoutMilliseconds; - } - -#if NETSTANDARD2_1 || NET - internal GrpcChannel? Channel { get; set; } -#else - internal Channel? Channel { get; set; } -#endif - - internal Uri Endpoint { get; } - - internal Metadata Headers { get; } - - internal int TimeoutMilliseconds { get; } - - /// - public abstract ExportClientResponse SendExportRequest(TRequest request, DateTime deadlineUtc, CancellationToken cancellationToken = default); - - /// - public virtual bool Shutdown(int timeoutMilliseconds) - { - if (this.Channel == null) - { - return true; - } - - if (timeoutMilliseconds == -1) - { - this.Channel.ShutdownAsync().Wait(); - return true; - } - else - { - return Task.WaitAny(new Task[] { this.Channel.ShutdownAsync(), Task.Delay(timeoutMilliseconds) }) == 0; - } - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/BaseOtlpHttpExportClient.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/BaseOtlpHttpExportClient.cs deleted file mode 100644 index 7e975fa1339..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/BaseOtlpHttpExportClient.cs +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -#if NETFRAMEWORK -using System.Net.Http; -#endif -using OpenTelemetry.Internal; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; - -/// Base class for sending OTLP export request over HTTP. -/// Type of export request. -internal abstract class BaseOtlpHttpExportClient : IExportClient -{ - private static readonly ExportClientHttpResponse SuccessExportResponse = new ExportClientHttpResponse(success: true, deadlineUtc: default, response: null, exception: null); -#if NET - private readonly bool synchronousSendSupportedByCurrentPlatform; -#endif - - protected BaseOtlpHttpExportClient(OtlpExporterOptions options, HttpClient httpClient, string signalPath) - { - Guard.ThrowIfNull(options); - Guard.ThrowIfNull(httpClient); - Guard.ThrowIfNull(signalPath); - Guard.ThrowIfInvalidTimeout(options.TimeoutMilliseconds); - - Uri exporterEndpoint = options.AppendSignalPathToEndpoint - ? options.Endpoint.AppendPathIfNotPresent(signalPath) - : options.Endpoint; - this.Endpoint = new UriBuilder(exporterEndpoint).Uri; - this.Headers = options.GetHeaders>((d, k, v) => d.Add(k, v)); - this.HttpClient = httpClient; - -#if NET - // See: https://github.com/dotnet/runtime/blob/280f2a0c60ce0378b8db49adc0eecc463d00fe5d/src/libraries/System.Net.Http/src/System/Net/Http/HttpClientHandler.AnyMobile.cs#L767 - this.synchronousSendSupportedByCurrentPlatform = !OperatingSystem.IsAndroid() - && !OperatingSystem.IsIOS() - && !OperatingSystem.IsTvOS() - && !OperatingSystem.IsBrowser(); -#endif - } - - internal HttpClient HttpClient { get; } - - internal Uri Endpoint { get; set; } - - internal IReadOnlyDictionary Headers { get; } - - /// - public ExportClientResponse SendExportRequest(TRequest request, DateTime deadlineUtc, CancellationToken cancellationToken = default) - { - try - { - using var httpRequest = this.CreateHttpRequest(request); - - using var httpResponse = this.SendHttpRequest(httpRequest, cancellationToken); - - try - { - httpResponse.EnsureSuccessStatusCode(); - } - catch (HttpRequestException ex) - { - return new ExportClientHttpResponse(success: false, deadlineUtc: deadlineUtc, response: httpResponse, ex); - } - - // We do not need to return back response and deadline for successful response so using cached value. - return SuccessExportResponse; - } - catch (HttpRequestException ex) - { - OpenTelemetryProtocolExporterEventSource.Log.FailedToReachCollector(this.Endpoint, ex); - - return new ExportClientHttpResponse(success: false, deadlineUtc: deadlineUtc, response: null, exception: ex); - } - } - - /// - public bool Shutdown(int timeoutMilliseconds) - { - this.HttpClient.CancelPendingRequests(); - return true; - } - - protected abstract HttpContent CreateHttpContent(TRequest exportRequest); - - protected HttpRequestMessage CreateHttpRequest(TRequest exportRequest) - { - var request = new HttpRequestMessage(HttpMethod.Post, this.Endpoint); - foreach (var header in this.Headers) - { - request.Headers.Add(header.Key, header.Value); - } - - request.Content = this.CreateHttpContent(exportRequest); - - return request; - } - - protected HttpResponseMessage SendHttpRequest(HttpRequestMessage request, CancellationToken cancellationToken) - { -#if NET - return this.synchronousSendSupportedByCurrentPlatform - ? this.HttpClient.Send(request, cancellationToken) - : this.HttpClient.SendAsync(request, cancellationToken).GetAwaiter().GetResult(); -#else - return this.HttpClient.SendAsync(request, cancellationToken).GetAwaiter().GetResult(); -#endif - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/IExportClient.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/IExportClient.cs index 26c7c5e33c0..9df60c0e41c 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/IExportClient.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/IExportClient.cs @@ -4,17 +4,17 @@ namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; /// Export client interface. -/// Type of export request. -internal interface IExportClient +internal interface IExportClient { /// /// Method for sending export request to the server. /// - /// The request to send to the server. + /// The request body to send to the server. + /// length of the content. /// The deadline time in utc for export request to finish. /// An optional token for canceling the call. /// . - ExportClientResponse SendExportRequest(TRequest request, DateTime deadlineUtc, CancellationToken cancellationToken = default); + ExportClientResponse SendExportRequest(byte[] buffer, int contentLength, DateTime deadlineUtc, CancellationToken cancellationToken = default); /// /// Method for shutting down the export client. diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/IProtobufExportClient.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/IProtobufExportClient.cs deleted file mode 100644 index 617e5122134..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/IProtobufExportClient.cs +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; - -/// Export client interface. -internal interface IProtobufExportClient -{ - /// - /// Method for sending export request to the server. - /// - /// The request body to send to the server. - /// length of the content. - /// The deadline time in utc for export request to finish. - /// An optional token for canceling the call. - /// . - ExportClientResponse SendExportRequest(byte[] buffer, int contentLength, DateTime deadlineUtc, CancellationToken cancellationToken = default); - - /// - /// Method for shutting down the export client. - /// - /// - /// The number of milliseconds to wait, or Timeout.Infinite to - /// wait indefinitely. - /// - /// - /// Returns true if shutdown succeeded; otherwise, false. - /// - bool Shutdown(int timeoutMilliseconds); -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/ProtobufOtlpExportClient.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpExportClient.cs similarity index 84% rename from src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/ProtobufOtlpExportClient.cs rename to src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpExportClient.cs index 519afcd548a..7347c69b271 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/ProtobufOtlpExportClient.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpExportClient.cs @@ -9,14 +9,14 @@ namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; -internal abstract class ProtobufOtlpExportClient : IProtobufExportClient +internal abstract class OtlpExportClient : IExportClient { private static readonly Version Http2RequestVersion = new(2, 0); #if NET private static readonly bool SynchronousSendSupportedByCurrentPlatform; - static ProtobufOtlpExportClient() + static OtlpExportClient() { #if NET // See: https://github.com/dotnet/runtime/blob/280f2a0c60ce0378b8db49adc0eecc463d00fe5d/src/libraries/System.Net.Http/src/System/Net/Http/HttpClientHandler.AnyMobile.cs#L767 @@ -28,13 +28,24 @@ static ProtobufOtlpExportClient() } #endif - protected ProtobufOtlpExportClient(OtlpExporterOptions options, HttpClient httpClient, string signalPath) + protected OtlpExportClient(OtlpExporterOptions options, HttpClient httpClient, string signalPath) { Guard.ThrowIfNull(options); Guard.ThrowIfNull(httpClient); Guard.ThrowIfNull(signalPath); - Uri exporterEndpoint = options.Endpoint.AppendPathIfNotPresent(signalPath); + Uri exporterEndpoint; + if (options.Protocol == OtlpExportProtocol.Grpc) + { + exporterEndpoint = options.Endpoint.AppendPathIfNotPresent(signalPath); + } + else + { + exporterEndpoint = options.AppendSignalPathToEndpoint + ? options.Endpoint.AppendPathIfNotPresent(signalPath) + : options.Endpoint; + } + this.Endpoint = new UriBuilder(exporterEndpoint).Uri; this.Headers = options.GetHeaders>((d, k, v) => d.Add(k, v)); this.HttpClient = httpClient; diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/ProtobufOtlpGrpcExportClient.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpGrpcExportClient.cs similarity index 97% rename from src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/ProtobufOtlpGrpcExportClient.cs rename to src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpGrpcExportClient.cs index d70149e8344..1e645c2559f 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/ProtobufOtlpGrpcExportClient.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpGrpcExportClient.cs @@ -10,7 +10,7 @@ namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; /// Base class for sending OTLP export request over gRPC. -internal sealed class ProtobufOtlpGrpcExportClient : ProtobufOtlpExportClient +internal sealed class OtlpGrpcExportClient : OtlpExportClient { public const string GrpcStatusDetailsHeader = "grpc-status-details-bin"; private static readonly ExportClientHttpResponse SuccessExportResponse = new(success: true, deadlineUtc: default, response: null, exception: null); @@ -24,7 +24,7 @@ private static readonly ExportClientGrpcResponse DefaultExceptionExportClientGrp status: null, grpcStatusDetailsHeader: null); - public ProtobufOtlpGrpcExportClient(OtlpExporterOptions options, HttpClient httpClient, string signalPath) + public OtlpGrpcExportClient(OtlpExporterOptions options, HttpClient httpClient, string signalPath) : base(options, httpClient, signalPath) { } diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpGrpcMetricsExportClient.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpGrpcMetricsExportClient.cs deleted file mode 100644 index b67b0789da0..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpGrpcMetricsExportClient.cs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using Grpc.Core; -using OtlpCollector = OpenTelemetry.Proto.Collector.Metrics.V1; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; - -/// Class for sending OTLP metrics export request over gRPC. -internal sealed class OtlpGrpcMetricsExportClient : BaseOtlpGrpcExportClient -{ - private readonly OtlpCollector.MetricsService.MetricsServiceClient metricsClient; - - public OtlpGrpcMetricsExportClient(OtlpExporterOptions options, OtlpCollector.MetricsService.MetricsServiceClient? metricsServiceClient = null) - : base(options) - { - if (metricsServiceClient != null) - { - this.metricsClient = metricsServiceClient; - } - else - { - this.Channel = options.CreateChannel(); - this.metricsClient = new OtlpCollector.MetricsService.MetricsServiceClient(this.Channel); - } - } - - /// - public override ExportClientResponse SendExportRequest(OtlpCollector.ExportMetricsServiceRequest request, DateTime deadlineUtc, CancellationToken cancellationToken = default) - { - try - { - this.metricsClient.Export(request, headers: this.Headers, deadline: deadlineUtc, cancellationToken: cancellationToken); - - // We do not need to return back response and deadline for successful response so using cached value. - return SuccessExportResponse; - } - catch (RpcException ex) - { - OpenTelemetryProtocolExporterEventSource.Log.FailedToReachCollector(this.Endpoint, ex); - - return new ExportClientGrpcResponse(false, deadlineUtc, ex, null, null); - } - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/ProtobufOtlpHttpExportClient.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpHttpExportClient.cs similarity index 90% rename from src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/ProtobufOtlpHttpExportClient.cs rename to src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpHttpExportClient.cs index 11bc932fa5a..0d938a90f89 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/ProtobufOtlpHttpExportClient.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpHttpExportClient.cs @@ -9,12 +9,12 @@ namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; /// Class for sending OTLP trace export request over HTTP. -internal sealed class ProtobufOtlpHttpExportClient : ProtobufOtlpExportClient +internal sealed class OtlpHttpExportClient : OtlpExportClient { internal static readonly MediaTypeHeaderValue MediaHeaderValue = new("application/x-protobuf"); private static readonly ExportClientHttpResponse SuccessExportResponse = new(success: true, deadlineUtc: default, response: null, exception: null); - internal ProtobufOtlpHttpExportClient(OtlpExporterOptions options, HttpClient httpClient, string signalPath) + internal OtlpHttpExportClient(OtlpExporterOptions options, HttpClient httpClient, string signalPath) : base(options, httpClient, signalPath) { } diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpHttpMetricsExportClient.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpHttpMetricsExportClient.cs deleted file mode 100644 index 7bd2b1610d4..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpHttpMetricsExportClient.cs +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using System.Net; -#if NETFRAMEWORK -using System.Net.Http; -#endif -using System.Net.Http.Headers; -using System.Runtime.CompilerServices; -using Google.Protobuf; -using OtlpCollector = OpenTelemetry.Proto.Collector.Metrics.V1; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; - -/// Class for sending OTLP metrics export request over HTTP. -internal sealed class OtlpHttpMetricsExportClient : BaseOtlpHttpExportClient -{ - internal const string MediaContentType = "application/x-protobuf"; - private const string MetricsExportPath = "v1/metrics"; - - public OtlpHttpMetricsExportClient(OtlpExporterOptions options, HttpClient httpClient) - : base(options, httpClient, MetricsExportPath) - { - } - - protected override HttpContent CreateHttpContent(OtlpCollector.ExportMetricsServiceRequest exportRequest) - { - return new ExportRequestContent(exportRequest); - } - - internal sealed class ExportRequestContent : HttpContent - { - private static readonly MediaTypeHeaderValue ProtobufMediaTypeHeader = new(MediaContentType); - - private readonly OtlpCollector.ExportMetricsServiceRequest exportRequest; - - public ExportRequestContent(OtlpCollector.ExportMetricsServiceRequest exportRequest) - { - this.exportRequest = exportRequest; - this.Headers.ContentType = ProtobufMediaTypeHeader; - } - -#if NET - protected override void SerializeToStream(Stream stream, TransportContext? context, CancellationToken cancellationToken) - { - this.SerializeToStreamInternal(stream); - } -#endif - - protected override Task SerializeToStreamAsync(Stream stream, TransportContext? context) - { - this.SerializeToStreamInternal(stream); - return Task.CompletedTask; - } - - protected override bool TryComputeLength(out long length) - { - // We can't know the length of the content being pushed to the output stream. - length = -1; - return false; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private void SerializeToStreamInternal(Stream stream) - { - this.exportRequest.WriteTo(stream); - } - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpRetry.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpRetry.cs index 2639333bce7..89afecb4e9d 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpRetry.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ExportClient/OtlpRetry.cs @@ -1,12 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -using System.Diagnostics; using System.Net; using System.Net.Http.Headers; -using Google.Rpc; -using Grpc.Core; -using Status = Google.Rpc.Status; +using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient.Grpc; namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; @@ -85,11 +82,7 @@ public static bool TryGetGrpcRetryResult(ExportClientGrpcResponse response, int { retryResult = default; - if (response.Exception is RpcException rpcException) - { - return TryGetRetryResult(rpcException.StatusCode, IsGrpcStatusCodeRetryable, response.DeadlineUtc, rpcException.Trailers, TryGetGrpcRetryDelay, retryDelayMilliseconds, out retryResult); - } - else if (response.Status != null) + if (response.Status != null) { var nextRetryDelayMilliseconds = retryDelayMilliseconds; @@ -98,7 +91,7 @@ public static bool TryGetGrpcRetryResult(ExportClientGrpcResponse response, int return false; } - var throttleDelay = Grpc.GrpcStatusDeserializer.TryGetGrpcRetryDelay(response.GrpcStatusDetailsHeader); + var throttleDelay = GrpcStatusDeserializer.TryGetGrpcRetryDelay(response.GrpcStatusDetailsHeader); var retryable = IsGrpcStatusCodeRetryable(response.Status.Value.StatusCode, throttleDelay.HasValue); if (!retryable) @@ -203,32 +196,6 @@ private static int CalculateNextRetryDelay(int nextRetryDelayMilliseconds) return Convert.ToInt32(nextMilliseconds); } - private static TimeSpan? TryGetGrpcRetryDelay(StatusCode statusCode, Metadata trailers) - { - Debug.Assert(trailers != null, "trailers was null"); - - if (statusCode != StatusCode.ResourceExhausted && statusCode != StatusCode.Unavailable) - { - return null; - } - - var statusDetails = trailers!.Get(GrpcStatusDetailsHeader); - if (statusDetails != null && statusDetails.IsBinary) - { - var status = Status.Parser.ParseFrom(statusDetails.ValueBytes); - foreach (var item in status.Details) - { - var success = item.TryUnpack(out var retryInfo); - if (success) - { - return retryInfo.RetryDelay.ToTimeSpan(); - } - } - } - - return null; - } - private static TimeSpan? TryGetHttpRetryDelay(HttpStatusCode statusCode, HttpResponseHeaders? responseHeaders) { #if NETSTANDARD2_1_OR_GREATER || NET @@ -258,24 +225,6 @@ private static bool IsGrpcStatusCodeRetryable(StatusCode statusCode, bool hasRet } } - private static bool IsGrpcStatusCodeRetryable(Grpc.StatusCode statusCode, bool hasRetryDelay) - { - switch (statusCode) - { - case Grpc.StatusCode.Cancelled: - case Grpc.StatusCode.DeadlineExceeded: - case Grpc.StatusCode.Aborted: - case Grpc.StatusCode.OutOfRange: - case Grpc.StatusCode.Unavailable: - case Grpc.StatusCode.DataLoss: - return true; - case Grpc.StatusCode.ResourceExhausted: - return hasRetryDelay; - default: - return false; - } - } - private static bool IsHttpStatusCodeRetryable(HttpStatusCode statusCode, bool hasRetryDelay) { switch (statusCode) diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/MetricItemExtensions.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/MetricItemExtensions.cs deleted file mode 100644 index ea014ec5b29..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/MetricItemExtensions.cs +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using System.Collections.Concurrent; -using System.Diagnostics; -using System.Runtime.CompilerServices; -using Google.Protobuf; -using Google.Protobuf.Collections; -using OpenTelemetry.Metrics; -using OpenTelemetry.Proto.Metrics.V1; -using AggregationTemporality = OpenTelemetry.Metrics.AggregationTemporality; -using Metric = OpenTelemetry.Metrics.Metric; -using OtlpCollector = OpenTelemetry.Proto.Collector.Metrics.V1; -using OtlpCommon = OpenTelemetry.Proto.Common.V1; -using OtlpMetrics = OpenTelemetry.Proto.Metrics.V1; -using OtlpResource = OpenTelemetry.Proto.Resource.V1; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; - -internal static class MetricItemExtensions -{ - private static readonly ConcurrentBag MetricListPool = new(); - - internal static void AddMetrics( - this OtlpCollector.ExportMetricsServiceRequest request, - OtlpResource.Resource processResource, - in Batch metrics) - { - var metricsByLibrary = new Dictionary(); - var resourceMetrics = new ResourceMetrics - { - Resource = processResource, - }; - request.ResourceMetrics.Add(resourceMetrics); - - foreach (var metric in metrics) - { - var otlpMetric = metric.ToOtlpMetric(); - - // TODO: Replace null check with exception handling. - if (otlpMetric == null) - { - OpenTelemetryProtocolExporterEventSource.Log.CouldNotTranslateMetric( - nameof(MetricItemExtensions), - nameof(AddMetrics)); - continue; - } - - var meterName = metric.MeterName; - if (!metricsByLibrary.TryGetValue(meterName, out var scopeMetrics)) - { - scopeMetrics = GetMetricListFromPool(meterName, metric.MeterVersion, metric.MeterTags); - - metricsByLibrary.Add(meterName, scopeMetrics); - resourceMetrics.ScopeMetrics.Add(scopeMetrics); - } - - scopeMetrics.Metrics.Add(otlpMetric); - } - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static void Return(this OtlpCollector.ExportMetricsServiceRequest request) - { - var resourceMetrics = request.ResourceMetrics.FirstOrDefault(); - if (resourceMetrics == null) - { - return; - } - - foreach (var scopeMetrics in resourceMetrics.ScopeMetrics) - { - scopeMetrics.Metrics.Clear(); - scopeMetrics.Scope.Attributes.Clear(); - MetricListPool.Add(scopeMetrics); - } - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static ScopeMetrics GetMetricListFromPool(string name, string version, IEnumerable>? meterTags) - { - if (!MetricListPool.TryTake(out var scopeMetrics)) - { - scopeMetrics = new ScopeMetrics - { - Scope = new OtlpCommon.InstrumentationScope - { - Name = name, // Name is enforced to not be null, but it can be empty. - Version = version ?? string.Empty, // NRE throw by proto - }, - }; - - if (meterTags != null) - { - AddScopeAttributes(meterTags, scopeMetrics.Scope.Attributes); - } - } - else - { - scopeMetrics.Scope.Name = name; - scopeMetrics.Scope.Version = version ?? string.Empty; - if (meterTags != null) - { - AddScopeAttributes(meterTags, scopeMetrics.Scope.Attributes); - } - } - - return scopeMetrics; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static OtlpMetrics.Metric ToOtlpMetric(this Metric metric) - { - var otlpMetric = new OtlpMetrics.Metric - { - Name = metric.Name, - }; - - if (metric.Description != null) - { - otlpMetric.Description = metric.Description; - } - - if (metric.Unit != null) - { - otlpMetric.Unit = metric.Unit; - } - - OtlpMetrics.AggregationTemporality temporality; - if (metric.Temporality == AggregationTemporality.Delta) - { - temporality = OtlpMetrics.AggregationTemporality.Delta; - } - else - { - temporality = OtlpMetrics.AggregationTemporality.Cumulative; - } - - switch (metric.MetricType) - { - case MetricType.LongSum: - case MetricType.LongSumNonMonotonic: - { - var sum = new Sum - { - IsMonotonic = metric.MetricType == MetricType.LongSum, - AggregationTemporality = temporality, - }; - - foreach (ref readonly var metricPoint in metric.GetMetricPoints()) - { - var dataPoint = new NumberDataPoint - { - StartTimeUnixNano = (ulong)metricPoint.StartTime.ToUnixTimeNanoseconds(), - TimeUnixNano = (ulong)metricPoint.EndTime.ToUnixTimeNanoseconds(), - }; - - AddAttributes(metricPoint.Tags, dataPoint.Attributes); - - dataPoint.AsInt = metricPoint.GetSumLong(); - - if (metricPoint.TryGetExemplars(out var exemplars)) - { - foreach (ref readonly var exemplar in exemplars) - { - dataPoint.Exemplars.Add( - ToOtlpExemplar(exemplar.LongValue, in exemplar)); - } - } - - sum.DataPoints.Add(dataPoint); - } - - otlpMetric.Sum = sum; - break; - } - - case MetricType.DoubleSum: - case MetricType.DoubleSumNonMonotonic: - { - var sum = new Sum - { - IsMonotonic = metric.MetricType == MetricType.DoubleSum, - AggregationTemporality = temporality, - }; - - foreach (ref readonly var metricPoint in metric.GetMetricPoints()) - { - var dataPoint = new NumberDataPoint - { - StartTimeUnixNano = (ulong)metricPoint.StartTime.ToUnixTimeNanoseconds(), - TimeUnixNano = (ulong)metricPoint.EndTime.ToUnixTimeNanoseconds(), - }; - - AddAttributes(metricPoint.Tags, dataPoint.Attributes); - - dataPoint.AsDouble = metricPoint.GetSumDouble(); - - if (metricPoint.TryGetExemplars(out var exemplars)) - { - foreach (ref readonly var exemplar in exemplars) - { - dataPoint.Exemplars.Add( - ToOtlpExemplar(exemplar.DoubleValue, in exemplar)); - } - } - - sum.DataPoints.Add(dataPoint); - } - - otlpMetric.Sum = sum; - break; - } - - case MetricType.LongGauge: - { - var gauge = new Gauge(); - foreach (ref readonly var metricPoint in metric.GetMetricPoints()) - { - var dataPoint = new NumberDataPoint - { - StartTimeUnixNano = (ulong)metricPoint.StartTime.ToUnixTimeNanoseconds(), - TimeUnixNano = (ulong)metricPoint.EndTime.ToUnixTimeNanoseconds(), - }; - - AddAttributes(metricPoint.Tags, dataPoint.Attributes); - - dataPoint.AsInt = metricPoint.GetGaugeLastValueLong(); - - if (metricPoint.TryGetExemplars(out var exemplars)) - { - foreach (ref readonly var exemplar in exemplars) - { - dataPoint.Exemplars.Add( - ToOtlpExemplar(exemplar.LongValue, in exemplar)); - } - } - - gauge.DataPoints.Add(dataPoint); - } - - otlpMetric.Gauge = gauge; - break; - } - - case MetricType.DoubleGauge: - { - var gauge = new Gauge(); - foreach (ref readonly var metricPoint in metric.GetMetricPoints()) - { - var dataPoint = new NumberDataPoint - { - StartTimeUnixNano = (ulong)metricPoint.StartTime.ToUnixTimeNanoseconds(), - TimeUnixNano = (ulong)metricPoint.EndTime.ToUnixTimeNanoseconds(), - }; - - AddAttributes(metricPoint.Tags, dataPoint.Attributes); - - dataPoint.AsDouble = metricPoint.GetGaugeLastValueDouble(); - - if (metricPoint.TryGetExemplars(out var exemplars)) - { - foreach (ref readonly var exemplar in exemplars) - { - dataPoint.Exemplars.Add( - ToOtlpExemplar(exemplar.DoubleValue, in exemplar)); - } - } - - gauge.DataPoints.Add(dataPoint); - } - - otlpMetric.Gauge = gauge; - break; - } - - case MetricType.Histogram: - { - var histogram = new Histogram - { - AggregationTemporality = temporality, - }; - - foreach (ref readonly var metricPoint in metric.GetMetricPoints()) - { - var dataPoint = new HistogramDataPoint - { - StartTimeUnixNano = (ulong)metricPoint.StartTime.ToUnixTimeNanoseconds(), - TimeUnixNano = (ulong)metricPoint.EndTime.ToUnixTimeNanoseconds(), - }; - - AddAttributes(metricPoint.Tags, dataPoint.Attributes); - dataPoint.Count = (ulong)metricPoint.GetHistogramCount(); - dataPoint.Sum = metricPoint.GetHistogramSum(); - - if (metricPoint.TryGetHistogramMinMaxValues(out double min, out double max)) - { - dataPoint.Min = min; - dataPoint.Max = max; - } - - foreach (var histogramMeasurement in metricPoint.GetHistogramBuckets()) - { - dataPoint.BucketCounts.Add((ulong)histogramMeasurement.BucketCount); - if (histogramMeasurement.ExplicitBound != double.PositiveInfinity) - { - dataPoint.ExplicitBounds.Add(histogramMeasurement.ExplicitBound); - } - } - - if (metricPoint.TryGetExemplars(out var exemplars)) - { - foreach (ref readonly var exemplar in exemplars) - { - dataPoint.Exemplars.Add( - ToOtlpExemplar(exemplar.DoubleValue, in exemplar)); - } - } - - histogram.DataPoints.Add(dataPoint); - } - - otlpMetric.Histogram = histogram; - break; - } - - case MetricType.ExponentialHistogram: - { - var histogram = new ExponentialHistogram - { - AggregationTemporality = temporality, - }; - - foreach (ref readonly var metricPoint in metric.GetMetricPoints()) - { - var dataPoint = new ExponentialHistogramDataPoint - { - StartTimeUnixNano = (ulong)metricPoint.StartTime.ToUnixTimeNanoseconds(), - TimeUnixNano = (ulong)metricPoint.EndTime.ToUnixTimeNanoseconds(), - }; - - AddAttributes(metricPoint.Tags, dataPoint.Attributes); - dataPoint.Count = (ulong)metricPoint.GetHistogramCount(); - dataPoint.Sum = metricPoint.GetHistogramSum(); - - if (metricPoint.TryGetHistogramMinMaxValues(out double min, out double max)) - { - dataPoint.Min = min; - dataPoint.Max = max; - } - - var exponentialHistogramData = metricPoint.GetExponentialHistogramData(); - dataPoint.Scale = exponentialHistogramData.Scale; - dataPoint.ZeroCount = (ulong)exponentialHistogramData.ZeroCount; - - dataPoint.Positive = new ExponentialHistogramDataPoint.Types.Buckets(); - dataPoint.Positive.Offset = exponentialHistogramData.PositiveBuckets.Offset; - foreach (var bucketCount in exponentialHistogramData.PositiveBuckets) - { - dataPoint.Positive.BucketCounts.Add((ulong)bucketCount); - } - - if (metricPoint.TryGetExemplars(out var exemplars)) - { - foreach (ref readonly var exemplar in exemplars) - { - dataPoint.Exemplars.Add( - ToOtlpExemplar(exemplar.DoubleValue, in exemplar)); - } - } - - histogram.DataPoints.Add(dataPoint); - } - - otlpMetric.ExponentialHistogram = histogram; - break; - } - } - - return otlpMetric; - } - - internal static OtlpMetrics.Exemplar ToOtlpExemplar(T value, in Metrics.Exemplar exemplar) - where T : struct - { - var otlpExemplar = new OtlpMetrics.Exemplar - { - TimeUnixNano = (ulong)exemplar.Timestamp.ToUnixTimeNanoseconds(), - }; - - if (exemplar.TraceId != default) - { - byte[] traceIdBytes = new byte[16]; - exemplar.TraceId.CopyTo(traceIdBytes); - - byte[] spanIdBytes = new byte[8]; - exemplar.SpanId.CopyTo(spanIdBytes); - - otlpExemplar.TraceId = UnsafeByteOperations.UnsafeWrap(traceIdBytes); - otlpExemplar.SpanId = UnsafeByteOperations.UnsafeWrap(spanIdBytes); - } - - if (typeof(T) == typeof(long)) - { - otlpExemplar.AsInt = (long)(object)value; - } - else if (typeof(T) == typeof(double)) - { - otlpExemplar.AsDouble = (double)(object)value; - } - else - { - Debug.Fail("Unexpected type"); - otlpExemplar.AsDouble = Convert.ToDouble(value); - } - - var otlpExemplarFilteredAttributes = otlpExemplar.FilteredAttributes; - - foreach (var tag in exemplar.FilteredTags) - { - OtlpTagWriter.Instance.TryWriteTag(ref otlpExemplarFilteredAttributes, tag); - } - - return otlpExemplar; - } - - private static void AddAttributes(ReadOnlyTagCollection tags, RepeatedField attributes) - { - foreach (var tag in tags) - { - OtlpTagWriter.Instance.TryWriteTag(ref attributes, tag); - } - } - - private static void AddScopeAttributes(IEnumerable> meterTags, RepeatedField attributes) - { - foreach (var tag in meterTags) - { - OtlpTagWriter.Instance.TryWriteTag(ref attributes, tag); - } - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/OtlpTagWriter.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/OtlpTagWriter.cs deleted file mode 100644 index c713d4b646e..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/OtlpTagWriter.cs +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using Google.Protobuf.Collections; -using OpenTelemetry.Internal; -using OtlpCommon = OpenTelemetry.Proto.Common.V1; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; - -internal sealed class OtlpTagWriter : TagWriter, OtlpCommon.ArrayValue> -{ - private OtlpTagWriter() - : base(new OtlpArrayTagWriter()) - { - } - - public static OtlpTagWriter Instance { get; } = new(); - - internal static OtlpCommon.AnyValue ToAnyValue(long value) - => new() { IntValue = value }; - - internal static OtlpCommon.AnyValue ToAnyValue(double value) - => new() { DoubleValue = value }; - - internal static OtlpCommon.AnyValue ToAnyValue(bool value) - => new() { BoolValue = value }; - - internal static OtlpCommon.AnyValue ToAnyValue(string value) - => new() { StringValue = value }; - - protected override void WriteIntegralTag(ref RepeatedField tags, string key, long value) - { - tags.Add(new OtlpCommon.KeyValue { Key = key, Value = ToAnyValue(value) }); - } - - protected override void WriteFloatingPointTag(ref RepeatedField tags, string key, double value) - { - tags.Add(new OtlpCommon.KeyValue { Key = key, Value = ToAnyValue(value) }); - } - - protected override void WriteBooleanTag(ref RepeatedField tags, string key, bool value) - { - tags.Add(new OtlpCommon.KeyValue { Key = key, Value = ToAnyValue(value) }); - } - - protected override void WriteStringTag(ref RepeatedField tags, string key, ReadOnlySpan value) - { - tags.Add(new OtlpCommon.KeyValue { Key = key, Value = ToAnyValue(value.ToString()) }); - } - - protected override void WriteArrayTag(ref RepeatedField tags, string key, ref OtlpCommon.ArrayValue value) - { - tags.Add(new OtlpCommon.KeyValue - { - Key = key, - Value = new OtlpCommon.AnyValue - { - ArrayValue = value, - }, - }); - } - - protected override void OnUnsupportedTagDropped( - string tagKey, - string tagValueTypeFullName) - { - OpenTelemetryProtocolExporterEventSource.Log.UnsupportedAttributeType( - tagValueTypeFullName, - tagKey); - } - - private sealed class OtlpArrayTagWriter : ArrayTagWriter - { - public override OtlpCommon.ArrayValue BeginWriteArray() => new(); - - public override void WriteNullValue(ref OtlpCommon.ArrayValue array) - { - array.Values.Add(new OtlpCommon.AnyValue()); - } - - public override void WriteIntegralValue(ref OtlpCommon.ArrayValue array, long value) - { - array.Values.Add(ToAnyValue(value)); - } - - public override void WriteFloatingPointValue(ref OtlpCommon.ArrayValue array, double value) - { - array.Values.Add(ToAnyValue(value)); - } - - public override void WriteBooleanValue(ref OtlpCommon.ArrayValue array, bool value) - { - array.Values.Add(ToAnyValue(value)); - } - - public override void WriteStringValue(ref OtlpCommon.ArrayValue array, ReadOnlySpan value) - { - array.Values.Add(ToAnyValue(value.ToString())); - } - - public override void EndWriteArray(ref OtlpCommon.ArrayValue array) - { - } - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ResourceExtensions.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ResourceExtensions.cs deleted file mode 100644 index f114f072972..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/ResourceExtensions.cs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using OpenTelemetry.Resources; -using OtlpCommon = OpenTelemetry.Proto.Common.V1; -using OtlpResource = OpenTelemetry.Proto.Resource.V1; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; - -internal static class ResourceExtensions -{ - public static OtlpResource.Resource ToOtlpResource(this Resource resource) - { - var processResource = new OtlpResource.Resource(); - - var processResourceAttributes = processResource.Attributes; - - foreach (KeyValuePair attribute in resource.Attributes) - { - OtlpTagWriter.Instance.TryWriteTag(ref processResourceAttributes, attribute.Key, attribute.Value); - } - - if (!processResource.Attributes.Any(kvp => kvp.Key == ResourceSemanticConventions.AttributeServiceName)) - { - var serviceName = (string)ResourceBuilder.CreateDefault().Build().Attributes.FirstOrDefault( - kvp => kvp.Key == ResourceSemanticConventions.AttributeServiceName).Value; - processResource.Attributes.Add(new OtlpCommon.KeyValue - { - Key = ResourceSemanticConventions.AttributeServiceName, - Value = new OtlpCommon.AnyValue { StringValue = serviceName }, - }); - } - - return processResource; - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterPersistentStorageTransmissionHandler.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterPersistentStorageTransmissionHandler.cs index 199f1510b2f..df75750d9d6 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterPersistentStorageTransmissionHandler.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterPersistentStorageTransmissionHandler.cs @@ -2,17 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 using System.Diagnostics; -using Google.Protobuf; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; using OpenTelemetry.PersistentStorage.Abstractions; using OpenTelemetry.PersistentStorage.FileSystem; -using OpenTelemetry.Proto.Collector.Logs.V1; -using OpenTelemetry.Proto.Collector.Metrics.V1; -using OpenTelemetry.Proto.Collector.Trace.V1; namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; -internal sealed class OtlpExporterPersistentStorageTransmissionHandler : OtlpExporterTransmissionHandler, IDisposable +internal sealed class OtlpExporterPersistentStorageTransmissionHandler : OtlpExporterTransmissionHandler, IDisposable { private const int RetryIntervalInMilliseconds = 60000; private readonly ManualResetEvent shutdownEvent = new(false); @@ -20,26 +16,22 @@ internal sealed class OtlpExporterPersistentStorageTransmissionHandler private readonly AutoResetEvent exportEvent = new(false); private readonly Thread thread; private readonly PersistentBlobProvider persistentBlobProvider; - private readonly Func requestFactory; private bool disposed; - public OtlpExporterPersistentStorageTransmissionHandler(IExportClient exportClient, double timeoutMilliseconds, Func requestFactory, string storagePath) - : this(new FileBlobProvider(storagePath), exportClient, timeoutMilliseconds, requestFactory) + public OtlpExporterPersistentStorageTransmissionHandler(IExportClient exportClient, double timeoutMilliseconds, string storagePath) + : this(new FileBlobProvider(storagePath), exportClient, timeoutMilliseconds) { } - internal OtlpExporterPersistentStorageTransmissionHandler(PersistentBlobProvider persistentBlobProvider, IExportClient exportClient, double timeoutMilliseconds, Func requestFactory) + internal OtlpExporterPersistentStorageTransmissionHandler(PersistentBlobProvider persistentBlobProvider, IExportClient exportClient, double timeoutMilliseconds) : base(exportClient, timeoutMilliseconds) { Debug.Assert(persistentBlobProvider != null, "persistentBlobProvider was null"); - Debug.Assert(requestFactory != null, "requestFactory was null"); - this.persistentBlobProvider = persistentBlobProvider!; - this.requestFactory = requestFactory!; this.thread = new Thread(this.RetryStoredRequests) { - Name = $"OtlpExporter Persistent Retry Storage - {typeof(TRequest)}", + Name = $"OtlpExporter Persistent Retry Storage", IsBackground = true, }; @@ -54,36 +46,10 @@ internal bool InitiateAndWaitForRetryProcess(int timeOutMilliseconds) return this.dataExportNotification.WaitOne(timeOutMilliseconds); } - protected override bool OnSubmitRequestFailure(TRequest request, ExportClientResponse response) + protected override bool OnSubmitRequestFailure(byte[] request, int contentLength, ExportClientResponse response) { - if (RetryHelper.ShouldRetryRequest(response, OtlpRetry.InitialBackoffMilliseconds, out _)) - { - byte[]? data = null; - if (request is ExportTraceServiceRequest traceRequest) - { - data = traceRequest.ToByteArray(); - } - else if (request is ExportMetricsServiceRequest metricsRequest) - { - data = metricsRequest.ToByteArray(); - } - else if (request is ExportLogsServiceRequest logsRequest) - { - data = logsRequest.ToByteArray(); - } - else - { - Debug.Fail("Unexpected request type encountered"); - data = null; - } - - if (data != null) - { - return this.persistentBlobProvider.TryCreateBlob(data, out _); - } - } - - return false; + Debug.Assert(request != null, "request was null"); + return RetryHelper.ShouldRetryRequest(response, OtlpRetry.InitialBackoffMilliseconds, out _) && this.persistentBlobProvider.TryCreateBlob(request!, out _); } protected override void OnShutdown(int timeoutMilliseconds) @@ -157,9 +123,7 @@ private void RetryStoredRequests() if (blob.TryLease((int)this.TimeoutMilliseconds) && blob.TryRead(out var data)) { var deadlineUtc = DateTime.UtcNow.AddMilliseconds(this.TimeoutMilliseconds); - var request = this.requestFactory.Invoke(data); - if (this.TryRetryRequest(request, deadlineUtc, out var response) - || !RetryHelper.ShouldRetryRequest(response, OtlpRetry.InitialBackoffMilliseconds, out var retryInfo)) + if (this.TryRetryRequest(data, data.Length, deadlineUtc, out var response) || !RetryHelper.ShouldRetryRequest(response, OtlpRetry.InitialBackoffMilliseconds, out var retryInfo)) { blob.TryDelete(); } diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterRetryTransmissionHandler.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterRetryTransmissionHandler.cs index df54fb5f78b..6830288525e 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterRetryTransmissionHandler.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterRetryTransmissionHandler.cs @@ -5,14 +5,14 @@ namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; -internal sealed class OtlpExporterRetryTransmissionHandler : OtlpExporterTransmissionHandler +internal sealed class OtlpExporterRetryTransmissionHandler : OtlpExporterTransmissionHandler { - internal OtlpExporterRetryTransmissionHandler(IExportClient exportClient, double timeoutMilliseconds) + internal OtlpExporterRetryTransmissionHandler(IExportClient exportClient, double timeoutMilliseconds) : base(exportClient, timeoutMilliseconds) { } - protected override bool OnSubmitRequestFailure(TRequest request, ExportClientResponse response) + protected override bool OnSubmitRequestFailure(byte[] request, int contentLength, ExportClientResponse response) { var nextRetryDelayMilliseconds = OtlpRetry.InitialBackoffMilliseconds; while (RetryHelper.ShouldRetryRequest(response, nextRetryDelayMilliseconds, out var retryResult)) @@ -22,7 +22,7 @@ protected override bool OnSubmitRequestFailure(TRequest request, ExportClientRes // we would fail fast and drop the data. Thread.Sleep(retryResult.RetryDelay); - if (this.TryRetryRequest(request, response.DeadlineUtc, out response)) + if (this.TryRetryRequest(request, contentLength, response.DeadlineUtc, out response)) { return true; } diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterTransmissionHandler.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterTransmissionHandler.cs index 9ecb6c4785f..b63175ee502 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterTransmissionHandler.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterTransmissionHandler.cs @@ -7,9 +7,9 @@ namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; -internal class OtlpExporterTransmissionHandler : IDisposable +internal class OtlpExporterTransmissionHandler : IDisposable { - public OtlpExporterTransmissionHandler(IExportClient exportClient, double timeoutMilliseconds) + public OtlpExporterTransmissionHandler(IExportClient exportClient, double timeoutMilliseconds) { Guard.ThrowIfNull(exportClient); @@ -17,7 +17,7 @@ public OtlpExporterTransmissionHandler(IExportClient exportClient, dou this.TimeoutMilliseconds = timeoutMilliseconds; } - internal IExportClient ExportClient { get; } + internal IExportClient ExportClient { get; } internal double TimeoutMilliseconds { get; } @@ -25,21 +25,22 @@ public OtlpExporterTransmissionHandler(IExportClient exportClient, dou /// Attempts to send an export request to the server. /// /// The request to send to the server. + /// length of content. /// if the request is sent successfully; otherwise, . /// - public bool TrySubmitRequest(TRequest request) + public bool TrySubmitRequest(byte[] request, int contentLength) { try { var deadlineUtc = DateTime.UtcNow.AddMilliseconds(this.TimeoutMilliseconds); - var response = this.ExportClient.SendExportRequest(request, deadlineUtc); + var response = this.ExportClient.SendExportRequest(request, contentLength, deadlineUtc); if (response.Success) { return true; } - return this.OnSubmitRequestFailure(request, response); + return this.OnSubmitRequestFailure(request, contentLength, response); } catch (Exception ex) { @@ -100,32 +101,25 @@ protected virtual void OnShutdown(int timeoutMilliseconds) /// Fired when a request could not be submitted. /// /// The request that was attempted to send to the server. + /// Length of content. /// . /// If the request is resubmitted and succeeds; otherwise, . - protected virtual bool OnSubmitRequestFailure(TRequest request, ExportClientResponse response) - { - return false; - } + protected virtual bool OnSubmitRequestFailure(byte[] request, int contentLength, ExportClientResponse response) => false; /// /// Fired when resending a request to the server. /// /// The request to be resent to the server. + /// Length of content. /// The deadline time in utc for export request to finish. /// . /// If the retry succeeds; otherwise, . - protected bool TryRetryRequest(TRequest request, DateTime deadlineUtc, out ExportClientResponse response) + protected bool TryRetryRequest(byte[] request, int contentLength, DateTime deadlineUtc, out ExportClientResponse response) { - response = this.ExportClient.SendExportRequest(request, deadlineUtc); - if (!response.Success) - { - OpenTelemetryProtocolExporterEventSource.Log.ExportMethodException(response.Exception!, isRetry: true); - return false; - } - - return true; + response = this.ExportClient.SendExportRequest(request, contentLength, deadlineUtc); + return response.Success; } /// diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterPersistentStorageTransmissionHandler.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterPersistentStorageTransmissionHandler.cs deleted file mode 100644 index b3a719aa3d5..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterPersistentStorageTransmissionHandler.cs +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using System.Diagnostics; -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; -using OpenTelemetry.PersistentStorage.Abstractions; -using OpenTelemetry.PersistentStorage.FileSystem; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; - -internal sealed class ProtobufOtlpExporterPersistentStorageTransmissionHandler : ProtobufOtlpExporterTransmissionHandler, IDisposable -{ - private const int RetryIntervalInMilliseconds = 60000; - private readonly ManualResetEvent shutdownEvent = new(false); - private readonly ManualResetEvent dataExportNotification = new(false); - private readonly AutoResetEvent exportEvent = new(false); - private readonly Thread thread; - private readonly PersistentBlobProvider persistentBlobProvider; - private bool disposed; - - public ProtobufOtlpExporterPersistentStorageTransmissionHandler(IProtobufExportClient exportClient, double timeoutMilliseconds, string storagePath) - : this(new FileBlobProvider(storagePath), exportClient, timeoutMilliseconds) - { - } - - internal ProtobufOtlpExporterPersistentStorageTransmissionHandler(PersistentBlobProvider persistentBlobProvider, IProtobufExportClient exportClient, double timeoutMilliseconds) - : base(exportClient, timeoutMilliseconds) - { - Debug.Assert(persistentBlobProvider != null, "persistentBlobProvider was null"); - this.persistentBlobProvider = persistentBlobProvider!; - - this.thread = new Thread(this.RetryStoredRequests) - { - Name = $"OtlpExporter Persistent Retry Storage", - IsBackground = true, - }; - - this.thread.Start(); - } - - // Used for test. - internal bool InitiateAndWaitForRetryProcess(int timeOutMilliseconds) - { - this.exportEvent.Set(); - - return this.dataExportNotification.WaitOne(timeOutMilliseconds); - } - - protected override bool OnSubmitRequestFailure(byte[] request, int contentLength, ExportClientResponse response) - { - Debug.Assert(request != null, "request was null"); - return RetryHelper.ShouldRetryRequest(response, OtlpRetry.InitialBackoffMilliseconds, out _) && this.persistentBlobProvider.TryCreateBlob(request!, out _); - } - - protected override void OnShutdown(int timeoutMilliseconds) - { - var sw = timeoutMilliseconds == Timeout.Infinite ? null : Stopwatch.StartNew(); - - try - { - this.shutdownEvent.Set(); - } - catch (ObjectDisposedException) - { - // Dispose was called before shutdown. - } - - this.thread.Join(timeoutMilliseconds); - - if (sw != null) - { - var timeout = timeoutMilliseconds - sw.ElapsedMilliseconds; - - base.OnShutdown((int)Math.Max(timeout, 0)); - } - else - { - base.OnShutdown(timeoutMilliseconds); - } - } - - protected override void Dispose(bool disposing) - { - if (!this.disposed) - { - if (disposing) - { - this.shutdownEvent.Dispose(); - this.exportEvent.Dispose(); - this.dataExportNotification.Dispose(); - (this.persistentBlobProvider as IDisposable)?.Dispose(); - } - - this.disposed = true; - } - } - - private void RetryStoredRequests() - { - var handles = new WaitHandle[] { this.shutdownEvent, this.exportEvent }; - while (true) - { - try - { - var index = WaitHandle.WaitAny(handles, RetryIntervalInMilliseconds); - if (index == 0) - { - // Shutdown signaled - break; - } - - int fileCount = 0; - - // TODO: Run maintenance job. - // Transmit 10 files at a time. - while (fileCount < 10 && !this.shutdownEvent.WaitOne(0)) - { - if (!this.persistentBlobProvider.TryGetBlob(out var blob)) - { - break; - } - - if (blob.TryLease((int)this.TimeoutMilliseconds) && blob.TryRead(out var data)) - { - var deadlineUtc = DateTime.UtcNow.AddMilliseconds(this.TimeoutMilliseconds); - if (this.TryRetryRequest(data, data.Length, deadlineUtc, out var response) || !RetryHelper.ShouldRetryRequest(response, OtlpRetry.InitialBackoffMilliseconds, out var retryInfo)) - { - blob.TryDelete(); - } - - // TODO: extend the lease period based on the response from server on retryAfter. - } - - fileCount++; - } - - // Set and reset the handle to notify export and wait for next signal. - // This is used for InitiateAndWaitForRetryProcess. - this.dataExportNotification.Set(); - this.dataExportNotification.Reset(); - } - catch (Exception ex) - { - OpenTelemetryProtocolExporterEventSource.Log.RetryStoredRequestException(ex); - return; - } - } - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterRetryTransmissionHandler.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterRetryTransmissionHandler.cs deleted file mode 100644 index c1d5515a53f..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterRetryTransmissionHandler.cs +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; - -internal sealed class ProtobufOtlpExporterRetryTransmissionHandler : ProtobufOtlpExporterTransmissionHandler -{ - internal ProtobufOtlpExporterRetryTransmissionHandler(IProtobufExportClient exportClient, double timeoutMilliseconds) - : base(exportClient, timeoutMilliseconds) - { - } - - protected override bool OnSubmitRequestFailure(byte[] request, int contentLength, ExportClientResponse response) - { - var nextRetryDelayMilliseconds = OtlpRetry.InitialBackoffMilliseconds; - while (RetryHelper.ShouldRetryRequest(response, nextRetryDelayMilliseconds, out var retryResult)) - { - // Note: This delay cannot exceed the configured timeout period for otlp exporter. - // If the backend responds with `RetryAfter` duration that would result in exceeding the configured timeout period - // we would fail fast and drop the data. - Thread.Sleep(retryResult.RetryDelay); - - if (this.TryRetryRequest(request, contentLength, response.DeadlineUtc, out response)) - { - return true; - } - - nextRetryDelayMilliseconds = retryResult.NextRetryDelayMilliseconds; - } - - return false; - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterTransmissionHandler.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterTransmissionHandler.cs deleted file mode 100644 index 70dad49f9cd..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/ProtobufOtlpExporterTransmissionHandler.cs +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using System.Diagnostics; -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; -using OpenTelemetry.Internal; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; - -internal class ProtobufOtlpExporterTransmissionHandler : IDisposable -{ - public ProtobufOtlpExporterTransmissionHandler(IProtobufExportClient exportClient, double timeoutMilliseconds) - { - Guard.ThrowIfNull(exportClient); - - this.ExportClient = exportClient; - this.TimeoutMilliseconds = timeoutMilliseconds; - } - - internal IProtobufExportClient ExportClient { get; } - - internal double TimeoutMilliseconds { get; } - - /// - /// Attempts to send an export request to the server. - /// - /// The request to send to the server. - /// length of content. - /// if the request is sent successfully; otherwise, . - /// - public bool TrySubmitRequest(byte[] request, int contentLength) - { - try - { - var deadlineUtc = DateTime.UtcNow.AddMilliseconds(this.TimeoutMilliseconds); - var response = this.ExportClient.SendExportRequest(request, contentLength, deadlineUtc); - if (response.Success) - { - return true; - } - - return this.OnSubmitRequestFailure(request, contentLength, response); - } - catch (Exception ex) - { - OpenTelemetryProtocolExporterEventSource.Log.TrySubmitRequestException(ex); - return false; - } - } - - /// - /// Attempts to shutdown the transmission handler, blocks the current thread - /// until shutdown completed or timed out. - /// - /// - /// The number (non-negative) of milliseconds to wait, or - /// Timeout.Infinite to wait indefinitely. - /// - /// - /// Returns if shutdown succeeded; otherwise, . - /// - public bool Shutdown(int timeoutMilliseconds) - { - Guard.ThrowIfInvalidTimeout(timeoutMilliseconds); - - var sw = timeoutMilliseconds == Timeout.Infinite ? null : Stopwatch.StartNew(); - - this.OnShutdown(timeoutMilliseconds); - - if (sw != null) - { - var timeout = timeoutMilliseconds - sw.ElapsedMilliseconds; - - return this.ExportClient.Shutdown((int)Math.Max(timeout, 0)); - } - - return this.ExportClient.Shutdown(timeoutMilliseconds); - } - - /// - public void Dispose() - { - this.Dispose(true); - GC.SuppressFinalize(this); - } - - /// - /// Fired when the transmission handler is shutdown. - /// - /// - /// The number (non-negative) of milliseconds to wait, or - /// Timeout.Infinite to wait indefinitely. - /// - protected virtual void OnShutdown(int timeoutMilliseconds) - { - } - - /// - /// Fired when a request could not be submitted. - /// - /// The request that was attempted to send to the server. - /// Length of content. - /// . - /// If the request is resubmitted and succeeds; otherwise, . - protected virtual bool OnSubmitRequestFailure(byte[] request, int contentLength, ExportClientResponse response) => false; - - /// - /// Fired when resending a request to the server. - /// - /// The request to be resent to the server. - /// Length of content. - /// The deadline time in utc for export request to finish. - /// . - /// If the retry succeeds; otherwise, . - protected bool TryRetryRequest(byte[] request, int contentLength, DateTime deadlineUtc, out ExportClientResponse response) - { - response = this.ExportClient.SendExportRequest(request, contentLength, deadlineUtc); - return response.Success; - } - - /// - /// Releases the unmanaged resources used by this class and optionally - /// releases the managed resources. - /// - /// - /// to release both managed and unmanaged resources; - /// to release only unmanaged resources. - /// - protected virtual void Dispose(bool disposing) - { - } -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/google/rpc/error_details.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/google/rpc/error_details.proto deleted file mode 100644 index 0d5461a7287..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/google/rpc/error_details.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.rpc; - -import "google/protobuf/duration.proto"; - -// Describes when the clients can retry a failed request. Clients could ignore -// the recommendation here or retry when this information is missing from error -// responses. -// -// It's always recommended that clients should use exponential backoff when -// retrying. -// -// Clients should wait until `retry_delay` amount of time has passed since -// receiving the error response before retrying. If retrying requests also -// fail, clients should use an exponential backoff scheme to gradually increase -// the delay between retries based on `retry_delay`, until either a maximum -// number of retries have been reached or a maximum retry delay cap has been -// reached. -message RetryInfo { - // Clients should wait at least this long between retrying the same request. - google.protobuf.Duration retry_delay = 1; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/google/rpc/status.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/google/rpc/status.proto deleted file mode 100644 index 40c5bf318c2..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/google/rpc/status.proto +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.rpc; - -import "google/protobuf/any.proto"; - -// The `Status` type defines a logical error model that is suitable for -// different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). Each `Status` message contains -// three pieces of data: error code, error message, and error details. -// -// You can find out more about this error model and how to work with it in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). -message Status { - // The status code, which should be an enum value of - // [google.rpc.Code][google.rpc.Code]. - int32 code = 1; - - // A developer-facing error message, which should be in English. Any - // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized - // by the client. - string message = 2; - - // A list of messages that carry the error details. There is a common set of - // message types for APIs to use. - repeated google.protobuf.Any details = 3; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/README.md b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/README.md deleted file mode 100644 index f82dbb0278b..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# OpenTelemetry Collector Proto - -This package describes the OpenTelemetry collector protocol. - -## Packages - -1. `common` package contains the common messages shared between different services. -2. `trace` package contains the Trace Service protos. -3. `metrics` package contains the Metrics Service protos. -4. `logs` package contains the Logs Service protos. diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto deleted file mode 100644 index 8260d8aaeb8..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.logs.v1; - -import "opentelemetry/proto/logs/v1/logs.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Logs.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.logs.v1"; -option java_outer_classname = "LogsServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/logs/v1"; - -// Service that can be used to push logs between one Application instrumented with -// OpenTelemetry and an collector, or between an collector and a central collector (in this -// case logs are sent/received to/from multiple Applications). -service LogsService { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - rpc Export(ExportLogsServiceRequest) returns (ExportLogsServiceResponse) {} -} - -message ExportLogsServiceRequest { - // An array of ResourceLogs. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.logs.v1.ResourceLogs resource_logs = 1; -} - -message ExportLogsServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportLogsPartialSuccess partial_success = 1; -} - -message ExportLogsPartialSuccess { - // The number of rejected log records. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_log_records = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml deleted file mode 100644 index 507473b9b3b..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the -# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. -type: google.api.Service -config_version: 3 -http: - rules: - - selector: opentelemetry.proto.collector.logs.v1.LogsService.Export - post: /v1/logs - body: "*" \ No newline at end of file diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto deleted file mode 100644 index dd48f1ad3a1..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.metrics.v1; - -import "opentelemetry/proto/metrics/v1/metrics.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Metrics.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.metrics.v1"; -option java_outer_classname = "MetricsServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/metrics/v1"; - -// Service that can be used to push metrics between one Application -// instrumented with OpenTelemetry and a collector, or between a collector and a -// central collector. -service MetricsService { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - rpc Export(ExportMetricsServiceRequest) returns (ExportMetricsServiceResponse) {} -} - -message ExportMetricsServiceRequest { - // An array of ResourceMetrics. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.metrics.v1.ResourceMetrics resource_metrics = 1; -} - -message ExportMetricsServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportMetricsPartialSuccess partial_success = 1; -} - -message ExportMetricsPartialSuccess { - // The number of rejected data points. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_data_points = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml deleted file mode 100644 index a5456502607..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the -# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. -type: google.api.Service -config_version: 3 -http: - rules: - - selector: opentelemetry.proto.collector.metrics.v1.MetricsService.Export - post: /v1/metrics - body: "*" \ No newline at end of file diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto deleted file mode 100644 index d0e7894b29c..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2023, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.profiles.v1experimental; - -import "opentelemetry/proto/profiles/v1experimental/profiles.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Profiles.V1Experimental"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.profiles.v1experimental"; -option java_outer_classname = "ProfilesServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/profiles/v1experimental"; - -// Service that can be used to push profiles between one Application instrumented with -// OpenTelemetry and a collector, or between a collector and a central collector. -service ProfilesService { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - rpc Export(ExportProfilesServiceRequest) returns (ExportProfilesServiceResponse) {} -} - -message ExportProfilesServiceRequest { - // An array of ResourceProfiles. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.profiles.v1experimental.ResourceProfiles resource_profiles = 1; -} - -message ExportProfilesServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportProfilesPartialSuccess partial_success = 1; -} - -message ExportProfilesPartialSuccess { - // The number of rejected profiles. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_profiles = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml deleted file mode 100644 index ea501afbbb2..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the -# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. -type: google.api.Service -config_version: 3 -http: - rules: - - selector: opentelemetry.proto.collector.profiles.v1experimental.ProfilesService.Export - post: /v1experimental/profiles - body: "*" diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto deleted file mode 100644 index d6fe67f9e55..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.trace.v1; - -import "opentelemetry/proto/trace/v1/trace.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Trace.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.trace.v1"; -option java_outer_classname = "TraceServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/trace/v1"; - -// Service that can be used to push spans between one Application instrumented with -// OpenTelemetry and a collector, or between a collector and a central collector (in this -// case spans are sent/received to/from multiple Applications). -service TraceService { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - rpc Export(ExportTraceServiceRequest) returns (ExportTraceServiceResponse) {} -} - -message ExportTraceServiceRequest { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.trace.v1.ResourceSpans resource_spans = 1; -} - -message ExportTraceServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportTracePartialSuccess partial_success = 1; -} - -message ExportTracePartialSuccess { - // The number of rejected spans. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_spans = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml deleted file mode 100644 index d091b3a8d53..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the -# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. -type: google.api.Service -config_version: 3 -http: - rules: - - selector: opentelemetry.proto.collector.trace.v1.TraceService.Export - post: /v1/traces - body: "*" diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/common/v1/common.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/common/v1/common.proto deleted file mode 100644 index ff8a21a1fa0..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/common/v1/common.proto +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.common.v1; - -option csharp_namespace = "OpenTelemetry.Proto.Common.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.common.v1"; -option java_outer_classname = "CommonProto"; -option go_package = "go.opentelemetry.io/proto/otlp/common/v1"; - -// AnyValue is used to represent any type of attribute value. AnyValue may contain a -// primitive value such as a string or integer or it may contain an arbitrary nested -// object containing arrays, key-value lists and primitives. -message AnyValue { - // The value is one of the listed fields. It is valid for all values to be unspecified - // in which case this AnyValue is considered to be "empty". - oneof value { - string string_value = 1; - bool bool_value = 2; - int64 int_value = 3; - double double_value = 4; - ArrayValue array_value = 5; - KeyValueList kvlist_value = 6; - bytes bytes_value = 7; - } -} - -// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message -// since oneof in AnyValue does not allow repeated fields. -message ArrayValue { - // Array of values. The array may be empty (contain 0 elements). - repeated AnyValue values = 1; -} - -// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message -// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need -// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to -// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches -// are semantically equivalent. -message KeyValueList { - // A collection of key/value pairs of key-value pairs. The list may be empty (may - // contain 0 elements). - // The keys MUST be unique (it is not allowed to have more than one - // value with the same key). - repeated KeyValue values = 1; -} - -// KeyValue is a key-value pair that is used to store Span attributes, Link -// attributes, etc. -message KeyValue { - string key = 1; - AnyValue value = 2; -} - -// InstrumentationScope is a message representing the instrumentation scope information -// such as the fully qualified name and version. -message InstrumentationScope { - // An empty instrumentation scope name means the name is unknown. - string name = 1; - string version = 2; - - // Additional attributes that describe the scope. [Optional]. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated KeyValue attributes = 3; - uint32 dropped_attributes_count = 4; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/logs/v1/logs.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/logs/v1/logs.proto deleted file mode 100644 index f9b97dd7451..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/logs/v1/logs.proto +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.logs.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Logs.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.logs.v1"; -option java_outer_classname = "LogsProto"; -option go_package = "go.opentelemetry.io/proto/otlp/logs/v1"; - -// LogsData represents the logs data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP logs data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message LogsData { - // An array of ResourceLogs. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceLogs resource_logs = 1; -} - -// A collection of ScopeLogs from a Resource. -message ResourceLogs { - reserved 1000; - - // The resource for the logs in this message. - // If this field is not set then resource info is unknown. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeLogs that originate from a resource. - repeated ScopeLogs scope_logs = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_logs" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Logs produced by a Scope. -message ScopeLogs { - // The instrumentation scope information for the logs in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of log records. - repeated LogRecord log_records = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the log data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all logs in the "logs" field. - string schema_url = 3; -} - -// Possible values for LogRecord.SeverityNumber. -enum SeverityNumber { - // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. - SEVERITY_NUMBER_UNSPECIFIED = 0; - SEVERITY_NUMBER_TRACE = 1; - SEVERITY_NUMBER_TRACE2 = 2; - SEVERITY_NUMBER_TRACE3 = 3; - SEVERITY_NUMBER_TRACE4 = 4; - SEVERITY_NUMBER_DEBUG = 5; - SEVERITY_NUMBER_DEBUG2 = 6; - SEVERITY_NUMBER_DEBUG3 = 7; - SEVERITY_NUMBER_DEBUG4 = 8; - SEVERITY_NUMBER_INFO = 9; - SEVERITY_NUMBER_INFO2 = 10; - SEVERITY_NUMBER_INFO3 = 11; - SEVERITY_NUMBER_INFO4 = 12; - SEVERITY_NUMBER_WARN = 13; - SEVERITY_NUMBER_WARN2 = 14; - SEVERITY_NUMBER_WARN3 = 15; - SEVERITY_NUMBER_WARN4 = 16; - SEVERITY_NUMBER_ERROR = 17; - SEVERITY_NUMBER_ERROR2 = 18; - SEVERITY_NUMBER_ERROR3 = 19; - SEVERITY_NUMBER_ERROR4 = 20; - SEVERITY_NUMBER_FATAL = 21; - SEVERITY_NUMBER_FATAL2 = 22; - SEVERITY_NUMBER_FATAL3 = 23; - SEVERITY_NUMBER_FATAL4 = 24; -} - -// LogRecordFlags represents constants used to interpret the -// LogRecord.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) -// -enum LogRecordFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - LOG_RECORD_FLAGS_DO_NOT_USE = 0; - - // Bits 0-7 are used for trace flags. - LOG_RECORD_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; - - // Bits 8-31 are reserved for future use. -} - -// A log record according to OpenTelemetry Log Data Model: -// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md -message LogRecord { - reserved 4; - - // time_unix_nano is the time when the event occurred. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - fixed64 time_unix_nano = 1; - - // Time when the event was observed by the collection system. - // For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) - // this timestamp is typically set at the generation time and is equal to Timestamp. - // For events originating externally and collected by OpenTelemetry (e.g. using - // Collector) this is the time when OpenTelemetry's code observed the event measured - // by the clock of the OpenTelemetry code. This field MUST be set once the event is - // observed by OpenTelemetry. - // - // For converting OpenTelemetry log data to formats that support only one timestamp or - // when receiving OpenTelemetry log data by recipients that support only one timestamp - // internally the following logic is recommended: - // - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - fixed64 observed_time_unix_nano = 11; - - // Numerical value of the severity, normalized to values described in Log Data Model. - // [Optional]. - SeverityNumber severity_number = 2; - - // The severity text (also known as log level). The original string representation as - // it is known at the source. [Optional]. - string severity_text = 3; - - // A value containing the body of the log record. Can be for example a human-readable - // string message (including multi-line) describing the event in a free form or it can - // be a structured data composed of arrays and maps of other values. [Optional]. - opentelemetry.proto.common.v1.AnyValue body = 5; - - // Additional attributes that describe the specific event occurrence. [Optional]. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 6; - uint32 dropped_attributes_count = 7; - - // Flags, a bit field. 8 least significant bits are the trace flags as - // defined in W3C Trace Context specification. 24 most significant bits are reserved - // and must be set to 0. Readers must not assume that 24 most significant bits - // will be zero and must correctly mask the bits when reading 8-bit trace flag (use - // flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional]. - fixed32 flags = 8; - - // A unique identifier for a trace. All logs from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is optional. - // - // The receivers SHOULD assume that the log record is not associated with a - // trace if any of the following is true: - // - the field is not present, - // - the field contains an invalid value. - bytes trace_id = 9; - - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is optional. If the sender specifies a valid span_id then it SHOULD also - // specify a valid trace_id. - // - // The receivers SHOULD assume that the log record is not associated with a - // span if any of the following is true: - // - the field is not present, - // - the field contains an invalid value. - bytes span_id = 10; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/metrics/v1/metrics.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/metrics/v1/metrics.proto deleted file mode 100644 index 19bb7ff8d53..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/metrics/v1/metrics.proto +++ /dev/null @@ -1,691 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.metrics.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.metrics.v1"; -option java_outer_classname = "MetricsProto"; -option go_package = "go.opentelemetry.io/proto/otlp/metrics/v1"; - -// MetricsData represents the metrics data that can be stored in a persistent -// storage, OR can be embedded by other protocols that transfer OTLP metrics -// data but do not implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message MetricsData { - // An array of ResourceMetrics. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceMetrics resource_metrics = 1; -} - -// A collection of ScopeMetrics from a Resource. -message ResourceMetrics { - reserved 1000; - - // The resource for the metrics in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of metrics that originate from a resource. - repeated ScopeMetrics scope_metrics = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_metrics" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Metrics produced by an Scope. -message ScopeMetrics { - // The instrumentation scope information for the metrics in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of metrics that originate from an instrumentation library. - repeated Metric metrics = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all metrics in the "metrics" field. - string schema_url = 3; -} - -// Defines a Metric which has one or more timeseries. The following is a -// brief summary of the Metric data model. For more details, see: -// -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md -// -// -// The data model and relation between entities is shown in the -// diagram below. Here, "DataPoint" is the term used to refer to any -// one of the specific data point value types, and "points" is the term used -// to refer to any one of the lists of points contained in the Metric. -// -// - Metric is composed of a metadata and data. -// - Metadata part contains a name, description, unit. -// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). -// - DataPoint contains timestamps, attributes, and one of the possible value type -// fields. -// -// Metric -// +------------+ -// |name | -// |description | -// |unit | +------------------------------------+ -// |data |---> |Gauge, Sum, Histogram, Summary, ... | -// +------------+ +------------------------------------+ -// -// Data [One of Gauge, Sum, Histogram, Summary, ...] -// +-----------+ -// |... | // Metadata about the Data. -// |points |--+ -// +-----------+ | -// | +---------------------------+ -// | |DataPoint 1 | -// v |+------+------+ +------+ | -// +-----+ ||label |label |...|label | | -// | 1 |-->||value1|value2|...|valueN| | -// +-----+ |+------+------+ +------+ | -// | . | |+-----+ | -// | . | ||value| | -// | . | |+-----+ | -// | . | +---------------------------+ -// | . | . -// | . | . -// | . | . -// | . | +---------------------------+ -// | . | |DataPoint M | -// +-----+ |+------+------+ +------+ | -// | M |-->||label |label |...|label | | -// +-----+ ||value1|value2|...|valueN| | -// |+------+------+ +------+ | -// |+-----+ | -// ||value| | -// |+-----+ | -// +---------------------------+ -// -// Each distinct type of DataPoint represents the output of a specific -// aggregation function, the result of applying the DataPoint's -// associated function of to one or more measurements. -// -// All DataPoint types have three common fields: -// - Attributes includes key-value pairs associated with the data point -// - TimeUnixNano is required, set to the end time of the aggregation -// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints -// having an AggregationTemporality field, as discussed below. -// -// Both TimeUnixNano and StartTimeUnixNano values are expressed as -// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. -// -// # TimeUnixNano -// -// This field is required, having consistent interpretation across -// DataPoint types. TimeUnixNano is the moment corresponding to when -// the data point's aggregate value was captured. -// -// Data points with the 0 value for TimeUnixNano SHOULD be rejected -// by consumers. -// -// # StartTimeUnixNano -// -// StartTimeUnixNano in general allows detecting when a sequence of -// observations is unbroken. This field indicates to consumers the -// start time for points with cumulative and delta -// AggregationTemporality, and it should be included whenever possible -// to support correct rate calculation. Although it may be omitted -// when the start time is truly unknown, setting StartTimeUnixNano is -// strongly encouraged. -message Metric { - reserved 4, 6, 8; - - // name of the metric. - string name = 1; - - // description of the metric, which can be used in documentation. - string description = 2; - - // unit in which the metric value is reported. Follows the format - // described by http://unitsofmeasure.org/ucum.html. - string unit = 3; - - // Data determines the aggregation type (if any) of the metric, what is the - // reported value type for the data points, as well as the relatationship to - // the time interval over which they are reported. - oneof data { - Gauge gauge = 5; - Sum sum = 7; - Histogram histogram = 9; - ExponentialHistogram exponential_histogram = 10; - Summary summary = 11; - } - - // Additional metadata attributes that describe the metric. [Optional]. - // Attributes are non-identifying. - // Consumers SHOULD NOT need to be aware of these attributes. - // These attributes MAY be used to encode information allowing - // for lossless roundtrip translation to / from another data model. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue metadata = 12; -} - -// Gauge represents the type of a scalar metric that always exports the -// "current value" for every data point. It should be used for an "unknown" -// aggregation. -// -// A Gauge does not support different aggregation temporalities. Given the -// aggregation is unknown, points cannot be combined using the same -// aggregation, regardless of aggregation temporalities. Therefore, -// AggregationTemporality is not included. Consequently, this also means -// "StartTimeUnixNano" is ignored for all data points. -message Gauge { - repeated NumberDataPoint data_points = 1; -} - -// Sum represents the type of a scalar metric that is calculated as a sum of all -// reported measurements over a time interval. -message Sum { - repeated NumberDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; - - // If "true" means that the sum is monotonic. - bool is_monotonic = 3; -} - -// Histogram represents the type of a metric that is calculated by aggregating -// as a Histogram of all reported measurements over a time interval. -message Histogram { - repeated HistogramDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; -} - -// ExponentialHistogram represents the type of a metric that is calculated by aggregating -// as a ExponentialHistogram of all reported double measurements over a time interval. -message ExponentialHistogram { - repeated ExponentialHistogramDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; -} - -// Summary metric data are used to convey quantile summaries, -// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) -// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) -// data type. These data points cannot always be merged in a meaningful way. -// While they can be useful in some applications, histogram data points are -// recommended for new applications. -message Summary { - repeated SummaryDataPoint data_points = 1; -} - -// AggregationTemporality defines how a metric aggregator reports aggregated -// values. It describes how those values relate to the time interval over -// which they are aggregated. -enum AggregationTemporality { - // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. - AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; - - // DELTA is an AggregationTemporality for a metric aggregator which reports - // changes since last report time. Successive metrics contain aggregation of - // values from continuous and non-overlapping intervals. - // - // The values for a DELTA metric are based only on the time interval - // associated with one measurement cycle. There is no dependency on - // previous measurements like is the case for CUMULATIVE metrics. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // DELTA metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0+1 to - // t_0+2 with a value of 2. - AGGREGATION_TEMPORALITY_DELTA = 1; - - // CUMULATIVE is an AggregationTemporality for a metric aggregator which - // reports changes since a fixed start time. This means that current values - // of a CUMULATIVE metric depend on all previous measurements since the - // start time. Because of this, the sender is required to retain this state - // in some form. If this state is lost or invalidated, the CUMULATIVE metric - // values MUST be reset and a new fixed start time following the last - // reported measurement time sent MUST be used. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // CUMULATIVE metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+2 with a value of 5. - // 9. The system experiences a fault and loses state. - // 10. The system recovers and resumes receiving at time=t_1. - // 11. A request is received, the system measures 1 request. - // 12. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_1 to - // t_0+1 with a value of 1. - // - // Note: Even though, when reporting changes since last report time, using - // CUMULATIVE is valid, it is not recommended. This may cause problems for - // systems that do not use start_time to determine when the aggregation - // value was reset (e.g. Prometheus). - AGGREGATION_TEMPORALITY_CUMULATIVE = 2; -} - -// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a -// bit-field representing 32 distinct boolean flags. Each flag defined in this -// enum is a bit-mask. To test the presence of a single flag in the flags of -// a data point, for example, use an expression like: -// -// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK -// -enum DataPointFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - DATA_POINT_FLAGS_DO_NOT_USE = 0; - - // This DataPoint is valid but has no recorded value. This value - // SHOULD be used to reflect explicitly missing data in a series, as - // for an equivalent to the Prometheus "staleness marker". - DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1; - - // Bits 2-31 are reserved for future use. -} - -// NumberDataPoint is a single data point in a timeseries that describes the -// time-varying scalar value of a metric. -message NumberDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // The value itself. A point is considered invalid when one of the recognized - // value fields is not present inside this oneof. - oneof value { - double as_double = 4; - sfixed64 as_int = 6; - } - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 5; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 8; -} - -// HistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Histogram. A Histogram contains summary statistics -// for a population of values, it may optionally contain the distribution of -// those values across a set of buckets. -// -// If the histogram contains the distribution of values, then both -// "explicit_bounds" and "bucket counts" fields must be defined. -// If the histogram does not contain the distribution of values, then both -// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and -// "sum" are known. -message HistogramDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // count is the number of values in the population. Must be non-negative. This - // value must be equal to the sum of the "count" fields in buckets if a - // histogram is provided. - fixed64 count = 4; - - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram - optional double sum = 5; - - // bucket_counts is an optional field contains the count values of histogram - // for each bucket. - // - // The sum of the bucket_counts must equal the value in the count field. - // - // The number of elements in bucket_counts array must be by one greater than - // the number of elements in explicit_bounds array. - repeated fixed64 bucket_counts = 6; - - // explicit_bounds specifies buckets with explicitly defined bounds for values. - // - // The boundaries for bucket at index i are: - // - // (-infinity, explicit_bounds[i]] for i == 0 - // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) - // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) - // - // The values in the explicit_bounds array must be strictly increasing. - // - // Histogram buckets are inclusive of their upper boundary, except the last - // bucket where the boundary is at infinity. This format is intentionally - // compatible with the OpenMetrics histogram definition. - repeated double explicit_bounds = 7; - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 8; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 10; - - // min is the minimum value over (start_time, end_time]. - optional double min = 11; - - // max is the maximum value over (start_time, end_time]. - optional double max = 12; -} - -// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains -// summary statistics for a population of values, it may optionally contain the -// distribution of those values across a set of buckets. -// -message ExponentialHistogramDataPoint { - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // count is the number of values in the population. Must be - // non-negative. This value must be equal to the sum of the "bucket_counts" - // values in the positive and negative Buckets plus the "zero_count" field. - fixed64 count = 4; - - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram - optional double sum = 5; - - // scale describes the resolution of the histogram. Boundaries are - // located at powers of the base, where: - // - // base = (2^(2^-scale)) - // - // The histogram bucket identified by `index`, a signed integer, - // contains values that are greater than (base^index) and - // less than or equal to (base^(index+1)). - // - // The positive and negative ranges of the histogram are expressed - // separately. Negative values are mapped by their absolute value - // into the negative range using the same scale as the positive range. - // - // scale is not restricted by the protocol, as the permissible - // values depend on the range of the data. - sint32 scale = 6; - - // zero_count is the count of values that are either exactly zero or - // within the region considered zero by the instrumentation at the - // tolerated degree of precision. This bucket stores values that - // cannot be expressed using the standard exponential formula as - // well as values that have been rounded to zero. - // - // Implementations MAY consider the zero bucket to have probability - // mass equal to (zero_count / count). - fixed64 zero_count = 7; - - // positive carries the positive range of exponential bucket counts. - Buckets positive = 8; - - // negative carries the negative range of exponential bucket counts. - Buckets negative = 9; - - // Buckets are a set of bucket counts, encoded in a contiguous array - // of counts. - message Buckets { - // Offset is the bucket index of the first entry in the bucket_counts array. - // - // Note: This uses a varint encoding as a simple form of compression. - sint32 offset = 1; - - // bucket_counts is an array of count values, where bucket_counts[i] carries - // the count of the bucket at index (offset+i). bucket_counts[i] is the count - // of values greater than base^(offset+i) and less than or equal to - // base^(offset+i+1). - // - // Note: By contrast, the explicit HistogramDataPoint uses - // fixed64. This field is expected to have many buckets, - // especially zeros, so uint64 has been selected to ensure - // varint encoding. - repeated uint64 bucket_counts = 2; - } - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 10; - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 11; - - // min is the minimum value over (start_time, end_time]. - optional double min = 12; - - // max is the maximum value over (start_time, end_time]. - optional double max = 13; - - // ZeroThreshold may be optionally set to convey the width of the zero - // region. Where the zero region is defined as the closed interval - // [-ZeroThreshold, ZeroThreshold]. - // When ZeroThreshold is 0, zero count bucket stores values that cannot be - // expressed using the standard exponential formula as well as values that - // have been rounded to zero. - double zero_threshold = 14; -} - -// SummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. -message SummaryDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // count is the number of values in the population. Must be non-negative. - fixed64 count = 4; - - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary - double sum = 5; - - // Represents the value at a given quantile of a distribution. - // - // To record Min and Max values following conventions are used: - // - The 1.0 quantile is equivalent to the maximum value observed. - // - The 0.0 quantile is equivalent to the minimum value observed. - // - // See the following issue for more context: - // https://github.com/open-telemetry/opentelemetry-proto/issues/125 - message ValueAtQuantile { - // The quantile of a distribution. Must be in the interval - // [0.0, 1.0]. - double quantile = 1; - - // The value at the given quantile of a distribution. - // - // Quantile values must NOT be negative. - double value = 2; - } - - // (Optional) list of values at different quantiles of the distribution calculated - // from the current snapshot. The quantiles must be strictly increasing. - repeated ValueAtQuantile quantile_values = 6; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 8; -} - -// A representation of an exemplar, which is a sample input measurement. -// Exemplars also hold information about the environment when the measurement -// was recorded, for example the span and trace ID of the active span when the -// exemplar was recorded. -message Exemplar { - reserved 1; - - // The set of key/value pairs that were filtered out by the aggregator, but - // recorded alongside the original measurement. Only key/value pairs that were - // filtered out by the aggregator should be included - repeated opentelemetry.proto.common.v1.KeyValue filtered_attributes = 7; - - // time_unix_nano is the exact time when this exemplar was recorded - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 2; - - // The value of the measurement that was recorded. An exemplar is - // considered invalid when one of the recognized value fields is not present - // inside this oneof. - oneof value { - double as_double = 3; - sfixed64 as_int = 6; - } - - // (Optional) Span ID of the exemplar trace. - // span_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - bytes span_id = 4; - - // (Optional) Trace ID of the exemplar trace. - // trace_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - bytes trace_id = 5; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto deleted file mode 100644 index b5b5b88fcef..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2023, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// This file includes work covered by the following copyright and permission notices: -// -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Profile is a common stacktrace profile format. -// -// Measurements represented with this format should follow the -// following conventions: -// -// - Consumers should treat unset optional fields as if they had been -// set with their default value. -// -// - When possible, measurements should be stored in "unsampled" form -// that is most useful to humans. There should be enough -// information present to determine the original sampled values. -// -// - On-disk, the serialized proto must be gzip-compressed. -// -// - The profile is represented as a set of samples, where each sample -// references a sequence of locations, and where each location belongs -// to a mapping. -// - There is a N->1 relationship from sample.location_id entries to -// locations. For every sample.location_id entry there must be a -// unique Location with that index. -// - There is an optional N->1 relationship from locations to -// mappings. For every nonzero Location.mapping_id there must be a -// unique Mapping with that index. - -syntax = "proto3"; - -package opentelemetry.proto.profiles.v1experimental; - -import "opentelemetry/proto/common/v1/common.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.profiles.v1experimental"; -option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1experimental"; - -// Represents a complete profile, including sample types, samples, -// mappings to binaries, locations, functions, string table, and additional metadata. -message Profile { - // A description of the samples associated with each Sample.value. - // For a cpu profile this might be: - // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] - // For a heap profile, this might be: - // [["allocations","count"], ["space","bytes"]], - // If one of the values represents the number of events represented - // by the sample, by convention it should be at index 0 and use - // sample_type.unit == "count". - repeated ValueType sample_type = 1; - // The set of samples recorded in this profile. - repeated Sample sample = 2; - // Mapping from address ranges to the image/binary/library mapped - // into that address range. mapping[0] will be the main binary. - repeated Mapping mapping = 3; - // Locations referenced by samples via location_indices. - repeated Location location = 4; - // Array of locations referenced by samples. - repeated int64 location_indices = 15; - // Functions referenced by locations. - repeated Function function = 5; - // Lookup table for attributes. - repeated opentelemetry.proto.common.v1.KeyValue attribute_table = 16; - // Represents a mapping between Attribute Keys and Units. - repeated AttributeUnit attribute_units = 17; - // Lookup table for links. - repeated Link link_table = 18; - // A common table for strings referenced by various messages. - // string_table[0] must always be "". - repeated string string_table = 6; - // frames with Function.function_name fully matching the following - // regexp will be dropped from the samples, along with their successors. - int64 drop_frames = 7; // Index into string table. - // frames with Function.function_name fully matching the following - // regexp will be kept, even if it matches drop_frames. - int64 keep_frames = 8; // Index into string table. - - // The following fields are informational, do not affect - // interpretation of results. - - // Time of collection (UTC) represented as nanoseconds past the epoch. - int64 time_nanos = 9; - // Duration of the profile, if a duration makes sense. - int64 duration_nanos = 10; - // The kind of events between sampled occurrences. - // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] - ValueType period_type = 11; - // The number of events between sampled occurrences. - int64 period = 12; - // Free-form text associated with the profile. The text is displayed as is - // to the user by the tools that read profiles (e.g. by pprof). This field - // should not be used to store any machine-readable information, it is only - // for human-friendly content. The profile must stay functional if this field - // is cleaned. - repeated int64 comment = 13; // Indices into string table. - // Index into the string table of the type of the preferred sample - // value. If unset, clients should default to the last sample value. - int64 default_sample_type = 14; -} - -// Represents a mapping between Attribute Keys and Units. -message AttributeUnit { - // Index into string table. - int64 attribute_key = 1; - // Index into string table. - int64 unit = 2; -} - -// A pointer from a profile Sample to a trace Span. -// Connects a profile sample to a trace span, identified by unique trace and span IDs. -message Link { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - bytes trace_id = 1; - - // A unique identifier for the linked span. The ID is an 8-byte array. - bytes span_id = 2; -} - -// Specifies the method of aggregating metric values, either DELTA (change since last report) -// or CUMULATIVE (total since a fixed start time). -enum AggregationTemporality { - /* UNSPECIFIED is the default AggregationTemporality, it MUST not be used. */ - AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; - - /** DELTA is an AggregationTemporality for a profiler which reports - changes since last report time. Successive metrics contain aggregation of - values from continuous and non-overlapping intervals. - - The values for a DELTA metric are based only on the time interval - associated with one measurement cycle. There is no dependency on - previous measurements like is the case for CUMULATIVE metrics. - - For example, consider a system measuring the number of requests that - it receives and reports the sum of these requests every second as a - DELTA metric: - - 1. The system starts receiving at time=t_0. - 2. A request is received, the system measures 1 request. - 3. A request is received, the system measures 1 request. - 4. A request is received, the system measures 1 request. - 5. The 1 second collection cycle ends. A metric is exported for the - number of requests received over the interval of time t_0 to - t_0+1 with a value of 3. - 6. A request is received, the system measures 1 request. - 7. A request is received, the system measures 1 request. - 8. The 1 second collection cycle ends. A metric is exported for the - number of requests received over the interval of time t_0+1 to - t_0+2 with a value of 2. */ - AGGREGATION_TEMPORALITY_DELTA = 1; - - /** CUMULATIVE is an AggregationTemporality for a profiler which - reports changes since a fixed start time. This means that current values - of a CUMULATIVE metric depend on all previous measurements since the - start time. Because of this, the sender is required to retain this state - in some form. If this state is lost or invalidated, the CUMULATIVE metric - values MUST be reset and a new fixed start time following the last - reported measurement time sent MUST be used. - - For example, consider a system measuring the number of requests that - it receives and reports the sum of these requests every second as a - CUMULATIVE metric: - - 1. The system starts receiving at time=t_0. - 2. A request is received, the system measures 1 request. - 3. A request is received, the system measures 1 request. - 4. A request is received, the system measures 1 request. - 5. The 1 second collection cycle ends. A metric is exported for the - number of requests received over the interval of time t_0 to - t_0+1 with a value of 3. - 6. A request is received, the system measures 1 request. - 7. A request is received, the system measures 1 request. - 8. The 1 second collection cycle ends. A metric is exported for the - number of requests received over the interval of time t_0 to - t_0+2 with a value of 5. - 9. The system experiences a fault and loses state. - 10. The system recovers and resumes receiving at time=t_1. - 11. A request is received, the system measures 1 request. - 12. The 1 second collection cycle ends. A metric is exported for the - number of requests received over the interval of time t_1 to - t_1+1 with a value of 1. - - Note: Even though, when reporting changes since last report time, using - CUMULATIVE is valid, it is not recommended. */ - AGGREGATION_TEMPORALITY_CUMULATIVE = 2; -} - -// ValueType describes the type and units of a value, with an optional aggregation temporality. -message ValueType { - int64 type = 1; // Index into string table. - int64 unit = 2; // Index into string table. - - AggregationTemporality aggregation_temporality = 3; -} - -// Each Sample records values encountered in some program -// context. The program context is typically a stack trace, perhaps -// augmented with auxiliary information like the thread-id, some -// indicator of a higher level request being handled etc. -message Sample { - // The indices recorded here correspond to locations in Profile.location. - // The leaf is at location_index[0]. [deprecated, superseded by locations_start_index / locations_length] - repeated uint64 location_index = 1; - // locations_start_index along with locations_length refers to to a slice of locations in Profile.location. - // Supersedes location_index. - uint64 locations_start_index = 7; - // locations_length along with locations_start_index refers to a slice of locations in Profile.location. - // Supersedes location_index. - uint64 locations_length = 8; - // A 128bit id that uniquely identifies this stacktrace, globally. Index into string table. [optional] - uint32 stacktrace_id_index = 9; - // The type and unit of each value is defined by the corresponding - // entry in Profile.sample_type. All samples must have the same - // number of values, the same as the length of Profile.sample_type. - // When aggregating multiple samples into a single sample, the - // result has a list of values that is the element-wise sum of the - // lists of the originals. - repeated int64 value = 2; - // label includes additional context for this sample. It can include - // things like a thread id, allocation size, etc. - // - // NOTE: While possible, having multiple values for the same label key is - // strongly discouraged and should never be used. Most tools (e.g. pprof) do - // not have good (or any) support for multi-value labels. And an even more - // discouraged case is having a string label and a numeric label of the same - // name on a sample. Again, possible to express, but should not be used. - // [deprecated, superseded by attributes] - repeated Label label = 3; - // References to attributes in Profile.attribute_table. [optional] - repeated uint64 attributes = 10; - - // Reference to link in Profile.link_table. [optional] - uint64 link = 12; - - // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected - // to fall within the Profile's time range. [optional] - repeated uint64 timestamps_unix_nano = 13; -} - -// Provides additional context for a sample, -// such as thread ID or allocation size, with optional units. [deprecated] -message Label { - int64 key = 1; // Index into string table - - // At most one of the following must be present - int64 str = 2; // Index into string table - int64 num = 3; - - // Should only be present when num is present. - // Specifies the units of num. - // Use arbitrary string (for example, "requests") as a custom count unit. - // If no unit is specified, consumer may apply heuristic to deduce the unit. - // Consumers may also interpret units like "bytes" and "kilobytes" as memory - // units and units like "seconds" and "nanoseconds" as time units, - // and apply appropriate unit conversions to these. - int64 num_unit = 4; // Index into string table -} - -// Indicates the semantics of the build_id field. -enum BuildIdKind { - // Linker-generated build ID, stored in the ELF binary notes. - BUILD_ID_LINKER = 0; - // Build ID based on the content hash of the binary. Currently no particular - // hashing approach is standardized, so a given producer needs to define it - // themselves and thus unlike BUILD_ID_LINKER this kind of hash is producer-specific. - // We may choose to provide a standardized stable hash recommendation later. - BUILD_ID_BINARY_HASH = 1; -} - -// Describes the mapping of a binary in memory, including its address range, -// file offset, and metadata like build ID -message Mapping { - // Unique nonzero id for the mapping. [deprecated] - uint64 id = 1; - // Address at which the binary (or DLL) is loaded into memory. - uint64 memory_start = 2; - // The limit of the address range occupied by this mapping. - uint64 memory_limit = 3; - // Offset in the binary that corresponds to the first mapped address. - uint64 file_offset = 4; - // The object this entry is loaded from. This can be a filename on - // disk for the main binary and shared libraries, or virtual - // abstractions like "[vdso]". - int64 filename = 5; // Index into string table - // A string that uniquely identifies a particular program version - // with high probability. E.g., for binaries generated by GNU tools, - // it could be the contents of the .note.gnu.build-id field. - int64 build_id = 6; // Index into string table - // Specifies the kind of build id. See BuildIdKind enum for more details [optional] - BuildIdKind build_id_kind = 11; - // References to attributes in Profile.attribute_table. [optional] - repeated uint64 attributes = 12; - // The following fields indicate the resolution of symbolic info. - bool has_functions = 7; - bool has_filenames = 8; - bool has_line_numbers = 9; - bool has_inline_frames = 10; -} - -// Describes function and line table debug information. -message Location { - // Unique nonzero id for the location. A profile could use - // instruction addresses or any integer sequence as ids. [deprecated] - uint64 id = 1; - // The index of the corresponding profile.Mapping for this location. - // It can be unset if the mapping is unknown or not applicable for - // this profile type. - uint64 mapping_index = 2; - // The instruction address for this location, if available. It - // should be within [Mapping.memory_start...Mapping.memory_limit] - // for the corresponding mapping. A non-leaf address may be in the - // middle of a call instruction. It is up to display tools to find - // the beginning of the instruction if necessary. - uint64 address = 3; - // Multiple line indicates this location has inlined functions, - // where the last entry represents the caller into which the - // preceding entries were inlined. - // - // E.g., if memcpy() is inlined into printf: - // line[0].function_name == "memcpy" - // line[1].function_name == "printf" - repeated Line line = 4; - // Provides an indication that multiple symbols map to this location's - // address, for example due to identical code folding by the linker. In that - // case the line information above represents one of the multiple - // symbols. This field must be recomputed when the symbolization state of the - // profile changes. - bool is_folded = 5; - - // Type of frame (e.g. kernel, native, python, hotspot, php). Index into string table. - uint32 type_index = 6; - - // References to attributes in Profile.attribute_table. [optional] - repeated uint64 attributes = 7; -} - -// Details a specific line in a source code, linked to a function. -message Line { - // The index of the corresponding profile.Function for this line. - uint64 function_index = 1; - // Line number in source code. - int64 line = 2; - // Column number in source code. - int64 column = 3; -} - -// Describes a function, including its human-readable name, system name, -// source file, and starting line number in the source. -message Function { - // Unique nonzero id for the function. [deprecated] - uint64 id = 1; - // Name of the function, in human-readable form if available. - int64 name = 2; // Index into string table - // Name of the function, as identified by the system. - // For instance, it can be a C++ mangled name. - int64 system_name = 3; // Index into string table - // Source file containing the function. - int64 filename = 4; // Index into string table - // Line number in source file. - int64 start_line = 5; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto deleted file mode 100644 index 84b0108f86a..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2023, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.profiles.v1experimental; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; -import "opentelemetry/proto/profiles/v1experimental/pprofextended.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.profiles.v1experimental"; -option java_outer_classname = "ProfilesProto"; -option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1experimental"; - -// Relationships Diagram -// -// ┌──────────────────┐ LEGEND -// │ ProfilesData │ -// └──────────────────┘ ─────▶ embedded -// │ -// │ 1-n ─────▷ referenced by index -// ▼ -// ┌──────────────────┐ -// │ ResourceProfiles │ -// └──────────────────┘ -// │ -// │ 1-n -// ▼ -// ┌──────────────────┐ -// │ ScopeProfiles │ -// └──────────────────┘ -// │ -// │ 1-n -// ▼ -// ┌──────────────────┐ -// │ ProfileContainer │ -// └──────────────────┘ -// │ -// │ 1-1 -// ▼ -// ┌──────────────────┐ -// │ Profile │ -// └──────────────────┘ -// │ n-1 -// │ 1-n ┌───────────────────────────────────────┐ -// ▼ │ ▽ -// ┌──────────────────┐ 1-n ┌──────────────┐ ┌──────────┐ -// │ Sample │ ──────▷ │ KeyValue │ │ Link │ -// └──────────────────┘ └──────────────┘ └──────────┘ -// │ 1-n △ △ -// │ 1-n ┌─────────────────┘ │ 1-n -// ▽ │ │ -// ┌──────────────────┐ n-1 ┌──────────────┐ -// │ Location │ ──────▷ │ Mapping │ -// └──────────────────┘ └──────────────┘ -// │ -// │ 1-n -// ▼ -// ┌──────────────────┐ -// │ Line │ -// └──────────────────┘ -// │ -// │ 1-1 -// ▽ -// ┌──────────────────┐ -// │ Function │ -// └──────────────────┘ -// - -// ProfilesData represents the profiles data that can be stored in persistent storage, -// OR can be embedded by other protocols that transfer OTLP profiles data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message ProfilesData { - // An array of ResourceProfiles. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceProfiles resource_profiles = 1; -} - - -// A collection of ScopeProfiles from a Resource. -message ResourceProfiles { - reserved 1000; - - // The resource for the profiles in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeProfiles that originate from a resource. - repeated ScopeProfiles scope_profiles = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_profiles" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of ProfileContainers produced by an InstrumentationScope. -message ScopeProfiles { - // The instrumentation scope information for the profiles in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of ProfileContainers that originate from an instrumentation scope. - repeated ProfileContainer profiles = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all profiles in the "profiles" field. - string schema_url = 3; -} - -// A ProfileContainer represents a single profile. It wraps pprof profile with OpenTelemetry specific metadata. -message ProfileContainer { - // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with - // all zeroes is considered invalid. - // - // This field is required. - bytes profile_id = 1; - - // start_time_unix_nano is the start time of the profile. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 start_time_unix_nano = 2; - - // end_time_unix_nano is the end time of the profile. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 end_time_unix_nano = 3; - - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; - - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - uint32 dropped_attributes_count = 5; - - // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] - string original_payload_format = 6; - - // Original payload can be stored in this field. This can be useful for users who want to get the original payload. - // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. - // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. - // If the original payload is in pprof format, it SHOULD not be included in this field. - // The field is optional, however if it is present `profile` MUST be present and contain the same profiling information. - bytes original_payload = 7; - - // This is a reference to a pprof profile. Required, even when original_payload is present. - opentelemetry.proto.profiles.v1experimental.Profile profile = 8; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/resource/v1/resource.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/resource/v1/resource.proto deleted file mode 100644 index 6637560bc35..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/resource/v1/resource.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.resource.v1; - -import "opentelemetry/proto/common/v1/common.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Resource.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.resource.v1"; -option java_outer_classname = "ResourceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/resource/v1"; - -// Resource information. -message Resource { - // Set of attributes that describe the resource. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; - - // dropped_attributes_count is the number of dropped attributes. If the value is 0, then - // no attributes were dropped. - uint32 dropped_attributes_count = 2; -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/trace/v1/trace.proto b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/trace/v1/trace.proto deleted file mode 100644 index 5cb2f3ce1cd..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/opentelemetry/proto/trace/v1/trace.proto +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.trace.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Trace.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.trace.v1"; -option java_outer_classname = "TraceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/trace/v1"; - -// TracesData represents the traces data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP traces data but do -// not implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message TracesData { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceSpans resource_spans = 1; -} - -// A collection of ScopeSpans from a Resource. -message ResourceSpans { - reserved 1000; - - // The resource for the spans in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeSpans that originate from a resource. - repeated ScopeSpans scope_spans = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_spans" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Spans produced by an InstrumentationScope. -message ScopeSpans { - // The instrumentation scope information for the spans in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of Spans that originate from an instrumentation scope. - repeated Span spans = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all spans and span events in the "spans" field. - string schema_url = 3; -} - -// A Span represents a single operation performed by a single component of the system. -// -// The next available field id is 17. -message Span { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - bytes trace_id = 1; - - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - bytes span_id = 2; - - // trace_state conveys information about request position in multiple distributed tracing graphs. - // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header - // See also https://github.com/w3c/distributed-tracing for more details about this field. - string trace_state = 3; - - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - bytes parent_span_id = 4; - - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether a span's parent - // is remote. The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // When creating span messages, if the message is logically forwarded from another source - // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD - // be copied as-is. If creating from a source that does not have an equivalent flags field - // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST - // be set to zero. - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // - // [Optional]. - fixed32 flags = 16; - - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // Empty value is equivalent to an unknown span name. - // - // This field is required. - string name = 5; - - // SpanKind is the type of span. Can be used to specify additional relationships between spans - // in addition to a parent/child relationship. - enum SpanKind { - // Unspecified. Do NOT use as default. - // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. - SPAN_KIND_UNSPECIFIED = 0; - - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. - SPAN_KIND_INTERNAL = 1; - - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - SPAN_KIND_SERVER = 2; - - // Indicates that the span describes a request to some remote service. - SPAN_KIND_CLIENT = 3; - - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. - SPAN_KIND_PRODUCER = 4; - - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. - SPAN_KIND_CONSUMER = 5; - } - - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - SpanKind kind = 6; - - // start_time_unix_nano is the start time of the span. On the client side, this is the time - // kept by the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 start_time_unix_nano = 7; - - // end_time_unix_nano is the end time of the span. On the client side, this is the time - // kept by the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 end_time_unix_nano = 8; - - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "example.com/myattribute": true - // "example.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; - - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - uint32 dropped_attributes_count = 10; - - // Event is a time-stamped annotation of the span, consisting of user-supplied - // text description and key-value pairs. - message Event { - // time_unix_nano is the time the event occurred. - fixed64 time_unix_nano = 1; - - // name of the event. - // This field is semantically required to be set to non-empty string. - string name = 2; - - // attributes is a collection of attribute key/value pairs on the event. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 3; - - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - uint32 dropped_attributes_count = 4; - } - - // events is a collection of Event items. - repeated Event events = 11; - - // dropped_events_count is the number of dropped events. If the value is 0, then no - // events were dropped. - uint32 dropped_events_count = 12; - - // A pointer from the current span to another span in the same trace or in a - // different trace. For example, this can be used in batching operations, - // where a single batch handler processes multiple requests from different - // traces or when the handler receives a request from a different project. - message Link { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - bytes trace_id = 1; - - // A unique identifier for the linked span. The ID is an 8-byte array. - bytes span_id = 2; - - // The trace_state associated with the link. - string trace_state = 3; - - // attributes is a collection of attribute key/value pairs on the link. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; - - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - uint32 dropped_attributes_count = 5; - - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether the link is remote. - // The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. - // - // [Optional]. - fixed32 flags = 6; - } - - // links is a collection of Links, which are references from this span to a span - // in the same or different trace. - repeated Link links = 13; - - // dropped_links_count is the number of dropped links after the maximum size was - // enforced. If this value is 0, then no links were dropped. - uint32 dropped_links_count = 14; - - // An optional final status for this span. Semantically when Status isn't set, it means - // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). - Status status = 15; -} - -// The Status type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. -message Status { - reserved 1; - - // A developer-facing human readable error message. - string message = 2; - - // For the semantics of status codes see - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status - enum StatusCode { - // The default status. - STATUS_CODE_UNSET = 0; - // The Span has been validated by an Application developer or Operator to - // have completed successfully. - STATUS_CODE_OK = 1; - // The Span contains an error. - STATUS_CODE_ERROR = 2; - }; - - // The status code. - StatusCode code = 3; -} - -// SpanFlags represents constants used to interpret the -// Span.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) -// -// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. -// -// Note that Span flags were introduced in version 1.1 of the -// OpenTelemetry protocol. Older Span producers do not set this -// field, consequently consumers should not rely on the absence of a -// particular flag bit to indicate the presence of a particular feature. -enum SpanFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - SPAN_FLAGS_DO_NOT_USE = 0; - - // Bits 0-7 are used for trace flags. - SPAN_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; - - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. - SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK = 0x00000100; - SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK = 0x00000200; - - // Bits 10-31 are reserved for future use. -} diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OpenTelemetry.Exporter.OpenTelemetryProtocol.csproj b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OpenTelemetry.Exporter.OpenTelemetryProtocol.csproj index 6c122ffa6de..e3aa4fcb8a8 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OpenTelemetry.Exporter.OpenTelemetryProtocol.csproj +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OpenTelemetry.Exporter.OpenTelemetryProtocol.csproj @@ -23,13 +23,6 @@ - - - - - - - @@ -41,10 +34,4 @@ - - - Implementation - - - diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpExporterOptionsExtensions.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpExporterOptionsExtensions.cs index 08728636824..fabb5a23392 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpExporterOptionsExtensions.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpExporterOptionsExtensions.cs @@ -4,17 +4,11 @@ #if NETFRAMEWORK using System.Net.Http; #endif +using System.Diagnostics; using System.Reflection; -using Grpc.Core; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; -#if NETSTANDARD2_1 || NET -using Grpc.Net.Client; -#endif -using System.Diagnostics; -using Google.Protobuf; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; -using MetricsOtlpCollector = OpenTelemetry.Proto.Collector.Metrics.V1; namespace OpenTelemetry.Exporter; @@ -28,39 +22,6 @@ internal static class OtlpExporterOptionsExtensions private const string MetricsHttpServicePath = "v1/metrics"; private const string LogsHttpServicePath = "v1/logs"; -#if NETSTANDARD2_1 || NET - public static GrpcChannel CreateChannel(this OtlpExporterOptions options) -#else - public static Channel CreateChannel(this OtlpExporterOptions options) -#endif - { - if (options.Endpoint.Scheme != Uri.UriSchemeHttp && options.Endpoint.Scheme != Uri.UriSchemeHttps) - { - throw new NotSupportedException($"Endpoint URI scheme ({options.Endpoint.Scheme}) is not supported. Currently only \"http\" and \"https\" are supported."); - } - -#if NETSTANDARD2_1 || NET - return GrpcChannel.ForAddress(options.Endpoint); -#else - ChannelCredentials channelCredentials; - if (options.Endpoint.Scheme == Uri.UriSchemeHttps) - { - channelCredentials = new SslCredentials(); - } - else - { - channelCredentials = ChannelCredentials.Insecure; - } - - return new Channel(options.Endpoint.Authority, channelCredentials); -#endif - } - - public static Metadata GetMetadataFromHeaders(this OtlpExporterOptions options) - { - return options.GetHeaders((m, k, v) => m.Add(k, v)); - } - public static THeaders GetHeaders(this OtlpExporterOptions options, Action addHeader) where THeaders : new() { @@ -97,37 +58,37 @@ public static THeaders GetHeaders(this OtlpExporterOptions options, Ac return headers; } - public static ProtobufOtlpExporterTransmissionHandler GetProtobufExportTransmissionHandler(this OtlpExporterOptions options, ExperimentalOptions experimentalOptions, OtlpSignalType otlpSignalType) + public static OtlpExporterTransmissionHandler GetProtobufExportTransmissionHandler(this OtlpExporterOptions options, ExperimentalOptions experimentalOptions, OtlpSignalType otlpSignalType) { var exportClient = GetProtobufExportClient(options, otlpSignalType); // `HttpClient.Timeout.TotalMilliseconds` would be populated with the correct timeout value for both the exporter configuration cases: // 1. User provides their own HttpClient. This case is straightforward as the user wants to use their `HttpClient` and thereby the same client's timeout value. // 2. If the user configures timeout via the exporter options, then the timeout set for the `HttpClient` initialized by the exporter will be set to user provided value. - double timeoutMilliseconds = exportClient is ProtobufOtlpHttpExportClient httpTraceExportClient + double timeoutMilliseconds = exportClient is OtlpHttpExportClient httpTraceExportClient ? httpTraceExportClient.HttpClient.Timeout.TotalMilliseconds : options.TimeoutMilliseconds; if (experimentalOptions.EnableInMemoryRetry) { - return new ProtobufOtlpExporterRetryTransmissionHandler(exportClient, timeoutMilliseconds); + return new OtlpExporterRetryTransmissionHandler(exportClient, timeoutMilliseconds); } else if (experimentalOptions.EnableDiskRetry) { Debug.Assert(!string.IsNullOrEmpty(experimentalOptions.DiskRetryDirectoryPath), $"{nameof(experimentalOptions.DiskRetryDirectoryPath)} is null or empty"); - return new ProtobufOtlpExporterPersistentStorageTransmissionHandler( + return new OtlpExporterPersistentStorageTransmissionHandler( exportClient, timeoutMilliseconds, Path.Combine(experimentalOptions.DiskRetryDirectoryPath, "traces")); } else { - return new ProtobufOtlpExporterTransmissionHandler(exportClient, timeoutMilliseconds); + return new OtlpExporterTransmissionHandler(exportClient, timeoutMilliseconds); } } - public static IProtobufExportClient GetProtobufExportClient(this OtlpExporterOptions options, OtlpSignalType otlpSignalType) + public static IExportClient GetProtobufExportClient(this OtlpExporterOptions options, OtlpSignalType otlpSignalType) { var httpClient = options.HttpClientFactory?.Invoke() ?? throw new InvalidOperationException("OtlpExporterOptions was missing HttpClientFactory or it returned null."); @@ -139,67 +100,21 @@ public static IProtobufExportClient GetProtobufExportClient(this OtlpExporterOpt return otlpSignalType switch { OtlpSignalType.Traces => options.Protocol == OtlpExportProtocol.Grpc - ? new ProtobufOtlpGrpcExportClient(options, httpClient, TraceGrpcServicePath) - : new ProtobufOtlpHttpExportClient(options, httpClient, TraceHttpServicePath), + ? new OtlpGrpcExportClient(options, httpClient, TraceGrpcServicePath) + : new OtlpHttpExportClient(options, httpClient, TraceHttpServicePath), OtlpSignalType.Metrics => options.Protocol == OtlpExportProtocol.Grpc - ? new ProtobufOtlpGrpcExportClient(options, httpClient, MetricsGrpcServicePath) - : new ProtobufOtlpHttpExportClient(options, httpClient, MetricsHttpServicePath), + ? new OtlpGrpcExportClient(options, httpClient, MetricsGrpcServicePath) + : new OtlpHttpExportClient(options, httpClient, MetricsHttpServicePath), OtlpSignalType.Logs => options.Protocol == OtlpExportProtocol.Grpc - ? new ProtobufOtlpGrpcExportClient(options, httpClient, LogsGrpcServicePath) - : new ProtobufOtlpHttpExportClient(options, httpClient, LogsHttpServicePath), + ? new OtlpGrpcExportClient(options, httpClient, LogsGrpcServicePath) + : new OtlpHttpExportClient(options, httpClient, LogsHttpServicePath), _ => throw new NotSupportedException($"OtlpSignalType {otlpSignalType} is not supported."), }; } - public static OtlpExporterTransmissionHandler GetMetricsExportTransmissionHandler(this OtlpExporterOptions options, ExperimentalOptions experimentalOptions) - { - var exportClient = GetMetricsExportClient(options); - - // `HttpClient.Timeout.TotalMilliseconds` would be populated with the correct timeout value for both the exporter configuration cases: - // 1. User provides their own HttpClient. This case is straightforward as the user wants to use their `HttpClient` and thereby the same client's timeout value. - // 2. If the user configures timeout via the exporter options, then the timeout set for the `HttpClient` initialized by the exporter will be set to user provided value. - double timeoutMilliseconds = exportClient is OtlpHttpMetricsExportClient httpMetricsExportClient - ? httpMetricsExportClient.HttpClient.Timeout.TotalMilliseconds - : options.TimeoutMilliseconds; - - if (experimentalOptions.EnableInMemoryRetry) - { - return new OtlpExporterRetryTransmissionHandler(exportClient, timeoutMilliseconds); - } - else if (experimentalOptions.EnableDiskRetry) - { - Debug.Assert(!string.IsNullOrEmpty(experimentalOptions.DiskRetryDirectoryPath), $"{nameof(experimentalOptions.DiskRetryDirectoryPath)} is null or empty"); - - return new OtlpExporterPersistentStorageTransmissionHandler( - exportClient, - timeoutMilliseconds, - (byte[] data) => - { - var request = new MetricsOtlpCollector.ExportMetricsServiceRequest(); - request.MergeFrom(data); - return request; - }, - Path.Combine(experimentalOptions.DiskRetryDirectoryPath, "metrics")); - } - else - { - return new OtlpExporterTransmissionHandler(exportClient, timeoutMilliseconds); - } - } - - public static IExportClient GetMetricsExportClient(this OtlpExporterOptions options) => - options.Protocol switch - { - OtlpExportProtocol.Grpc => new OtlpGrpcMetricsExportClient(options), - OtlpExportProtocol.HttpProtobuf => new OtlpHttpMetricsExportClient( - options, - options.HttpClientFactory?.Invoke() ?? throw new InvalidOperationException("OtlpExporterOptions was missing HttpClientFactory or it returned null.")), - _ => throw new NotSupportedException($"Protocol {options.Protocol} is not supported."), - }; - public static void TryEnableIHttpClientFactoryIntegration(this OtlpExporterOptions options, IServiceProvider serviceProvider, string httpClientName) { if (serviceProvider != null diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporter.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporter.cs index 7ef66c9bd52..97d07468311 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporter.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpLogExporter.cs @@ -19,7 +19,7 @@ public sealed class OtlpLogExporter : BaseExporter { private readonly SdkLimitOptions sdkLimitOptions; private readonly ExperimentalOptions experimentalOptions; - private readonly ProtobufOtlpExporterTransmissionHandler transmissionHandler; + private readonly OtlpExporterTransmissionHandler transmissionHandler; private readonly int startWritePosition; private Resource? resource; @@ -44,12 +44,12 @@ public OtlpLogExporter(OtlpExporterOptions options) /// . /// . /// . - /// . + /// . internal OtlpLogExporter( OtlpExporterOptions exporterOptions, SdkLimitOptions sdkLimitOptions, ExperimentalOptions experimentalOptions, - ProtobufOtlpExporterTransmissionHandler? transmissionHandler = null) + OtlpExporterTransmissionHandler? transmissionHandler = null) { Debug.Assert(exporterOptions != null, "exporterOptions was null"); Debug.Assert(sdkLimitOptions != null, "sdkLimitOptions was null"); diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpMetricExporter.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpMetricExporter.cs index aef2bdc4fe3..1014c1c2edf 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpMetricExporter.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpMetricExporter.cs @@ -1,12 +1,13 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +using System.Buffers.Binary; using System.Diagnostics; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; +using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Serializer; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; using OpenTelemetry.Metrics; -using OtlpCollector = OpenTelemetry.Proto.Collector.Metrics.V1; -using OtlpResource = OpenTelemetry.Proto.Resource.V1; +using OpenTelemetry.Resources; namespace OpenTelemetry.Exporter; @@ -16,9 +17,15 @@ namespace OpenTelemetry.Exporter; /// public class OtlpMetricExporter : BaseExporter { - private readonly OtlpExporterTransmissionHandler transmissionHandler; + private readonly OtlpExporterTransmissionHandler transmissionHandler; + private readonly int startWritePosition; - private OtlpResource.Resource? processResource; + private Resource? resource; + + // Initial buffer size set to ~732KB. + // This choice allows us to gradually grow the buffer while targeting a final capacity of around 100 MB, + // by the 7th doubling to maintain efficient allocation without frequent resizing. + private byte[] buffer = new byte[750000]; /// /// Initializes a new instance of the class. @@ -34,19 +41,20 @@ public OtlpMetricExporter(OtlpExporterOptions options) /// /// . /// . - /// . + /// . internal OtlpMetricExporter( OtlpExporterOptions exporterOptions, ExperimentalOptions experimentalOptions, - OtlpExporterTransmissionHandler? transmissionHandler = null) + OtlpExporterTransmissionHandler? transmissionHandler = null) { Debug.Assert(exporterOptions != null, "exporterOptions was null"); Debug.Assert(experimentalOptions != null, "experimentalOptions was null"); - this.transmissionHandler = transmissionHandler ?? exporterOptions!.GetMetricsExportTransmissionHandler(experimentalOptions!); + this.startWritePosition = exporterOptions!.Protocol == OtlpExportProtocol.Grpc ? 5 : 0; + this.transmissionHandler = transmissionHandler ?? exporterOptions!.GetProtobufExportTransmissionHandler(experimentalOptions!, OtlpSignalType.Metrics); } - internal OtlpResource.Resource ProcessResource => this.processResource ??= this.ParentProvider.GetResource().ToOtlpResource(); + internal Resource Resource => this.resource ??= this.ParentProvider.GetResource(); /// public override ExportResult Export(in Batch metrics) @@ -54,33 +62,59 @@ public override ExportResult Export(in Batch metrics) // Prevents the exporter's gRPC and HTTP operations from being instrumented. using var scope = SuppressInstrumentationScope.Begin(); - var request = new OtlpCollector.ExportMetricsServiceRequest(); - try { - request.AddMetrics(this.ProcessResource, metrics); + int writePosition = ProtobufOtlpMetricSerializer.WriteMetricsData(this.buffer, this.startWritePosition, this.Resource, metrics); + + if (this.startWritePosition == 5) + { + // Grpc payload consists of 3 parts + // byte 0 - Specifying if the payload is compressed. + // 1-4 byte - Specifies the length of payload in big endian format. + // 5 and above - Protobuf serialized data. + Span data = new Span(this.buffer, 1, 4); + var dataLength = writePosition - 5; + BinaryPrimitives.WriteUInt32BigEndian(data, (uint)dataLength); + } - if (!this.transmissionHandler.TrySubmitRequest(request)) + if (!this.transmissionHandler.TrySubmitRequest(this.buffer, writePosition)) { return ExportResult.Failure; } } + catch (IndexOutOfRangeException) + { + if (!this.IncreaseBufferSize()) + { + throw; + } + } catch (Exception ex) { OpenTelemetryProtocolExporterEventSource.Log.ExportMethodException(ex); return ExportResult.Failure; } - finally - { - request.Return(); - } return ExportResult.Success; } /// - protected override bool OnShutdown(int timeoutMilliseconds) + protected override bool OnShutdown(int timeoutMilliseconds) => this.transmissionHandler.Shutdown(timeoutMilliseconds); + + // TODO: Consider moving this to a shared utility class. + private bool IncreaseBufferSize() { - return this.transmissionHandler.Shutdown(timeoutMilliseconds); + var newBufferSize = this.buffer.Length * 2; + + if (newBufferSize > 100 * 1024 * 1024) + { + return false; + } + + var newBuffer = new byte[newBufferSize]; + this.buffer.CopyTo(newBuffer, 0); + this.buffer = newBuffer; + + return true; } } diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpMetricExporterExtensions.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpMetricExporterExtensions.cs index ab42b025478..1a8ef2f1b42 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpMetricExporterExtensions.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpMetricExporterExtensions.cs @@ -175,16 +175,7 @@ internal static MetricReader BuildOtlpExporterMetricReader( exporterOptions!.TryEnableIHttpClientFactoryIntegration(serviceProvider!, "OtlpMetricExporter"); - BaseExporter metricExporter; - - if (experimentalOptions != null && experimentalOptions.UseCustomProtobufSerializer) - { - metricExporter = new ProtobufOtlpMetricExporter(exporterOptions!, experimentalOptions!); - } - else - { - metricExporter = new OtlpMetricExporter(exporterOptions!, experimentalOptions!); - } + BaseExporter metricExporter = new OtlpMetricExporter(exporterOptions!, experimentalOptions!); if (configureExporterInstance != null) { diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpTraceExporter.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpTraceExporter.cs index bc7e062f648..a9cf0eb0477 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpTraceExporter.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/OtlpTraceExporter.cs @@ -17,7 +17,7 @@ namespace OpenTelemetry.Exporter; public class OtlpTraceExporter : BaseExporter { private readonly SdkLimitOptions sdkLimitOptions; - private readonly ProtobufOtlpExporterTransmissionHandler transmissionHandler; + private readonly OtlpExporterTransmissionHandler transmissionHandler; private readonly int startWritePosition; private Resource? resource; @@ -42,12 +42,12 @@ public OtlpTraceExporter(OtlpExporterOptions options) /// . /// . /// . - /// . + /// . internal OtlpTraceExporter( OtlpExporterOptions exporterOptions, SdkLimitOptions sdkLimitOptions, ExperimentalOptions experimentalOptions, - ProtobufOtlpExporterTransmissionHandler? transmissionHandler = null) + OtlpExporterTransmissionHandler? transmissionHandler = null) { Debug.Assert(exporterOptions != null, "exporterOptions was null"); Debug.Assert(sdkLimitOptions != null, "sdkLimitOptions was null"); diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/ProtobufOtlpMetricExporter.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/ProtobufOtlpMetricExporter.cs deleted file mode 100644 index 145e03e00ed..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/ProtobufOtlpMetricExporter.cs +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using System.Buffers.Binary; -using System.Diagnostics; -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Serializer; -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; -using OpenTelemetry.Metrics; -using OpenTelemetry.Resources; - -namespace OpenTelemetry.Exporter; - -/// -/// Exporter consuming and exporting the data using -/// the OpenTelemetry protocol (OTLP). -/// -internal sealed class ProtobufOtlpMetricExporter : BaseExporter -{ - private readonly ProtobufOtlpExporterTransmissionHandler transmissionHandler; - private readonly int startWritePosition; - - private Resource? resource; - - // Initial buffer size set to ~732KB. - // This choice allows us to gradually grow the buffer while targeting a final capacity of around 100 MB, - // by the 7th doubling to maintain efficient allocation without frequent resizing. - private byte[] buffer = new byte[750000]; - - /// - /// Initializes a new instance of the class. - /// - /// Configuration options for the exporter. - public ProtobufOtlpMetricExporter(OtlpExporterOptions options) - : this(options, experimentalOptions: new(), transmissionHandler: null) - { - } - - /// - /// Initializes a new instance of the class. - /// - /// . - /// . - /// . - internal ProtobufOtlpMetricExporter( - OtlpExporterOptions exporterOptions, - ExperimentalOptions experimentalOptions, - ProtobufOtlpExporterTransmissionHandler? transmissionHandler = null) - { - Debug.Assert(exporterOptions != null, "exporterOptions was null"); - Debug.Assert(experimentalOptions != null, "experimentalOptions was null"); - - this.startWritePosition = exporterOptions!.Protocol == OtlpExportProtocol.Grpc ? 5 : 0; - this.transmissionHandler = transmissionHandler ?? exporterOptions!.GetProtobufExportTransmissionHandler(experimentalOptions!, OtlpSignalType.Metrics); - } - - internal Resource Resource => this.resource ??= this.ParentProvider.GetResource(); - - /// - public override ExportResult Export(in Batch metrics) - { - // Prevents the exporter's gRPC and HTTP operations from being instrumented. - using var scope = SuppressInstrumentationScope.Begin(); - - try - { - int writePosition = ProtobufOtlpMetricSerializer.WriteMetricsData(this.buffer, this.startWritePosition, this.Resource, metrics); - - if (this.startWritePosition == 5) - { - // Grpc payload consists of 3 parts - // byte 0 - Specifying if the payload is compressed. - // 1-4 byte - Specifies the length of payload in big endian format. - // 5 and above - Protobuf serialized data. - Span data = new Span(this.buffer, 1, 4); - var dataLength = writePosition - 5; - BinaryPrimitives.WriteUInt32BigEndian(data, (uint)dataLength); - } - - if (!this.transmissionHandler.TrySubmitRequest(this.buffer, writePosition)) - { - return ExportResult.Failure; - } - } - catch (IndexOutOfRangeException) - { - if (!this.IncreaseBufferSize()) - { - throw; - } - } - catch (Exception ex) - { - OpenTelemetryProtocolExporterEventSource.Log.ExportMethodException(ex); - return ExportResult.Failure; - } - - return ExportResult.Success; - } - - /// - protected override bool OnShutdown(int timeoutMilliseconds) => this.transmissionHandler.Shutdown(timeoutMilliseconds); - - // TODO: Consider moving this to a shared utility class. - private bool IncreaseBufferSize() - { - var newBufferSize = this.buffer.Length * 2; - - if (newBufferSize > 100 * 1024 * 1024) - { - return false; - } - - var newBuffer = new byte[newBufferSize]; - this.buffer.CopyTo(newBuffer, 0); - this.buffer = newBuffer; - - return true; - } -} From cb52d26ccd284bae73a8bb88de74c7c901c6b73b Mon Sep 17 00:00:00 2001 From: Rajkumar Rangaraj Date: Thu, 28 Nov 2024 13:04:51 -0800 Subject: [PATCH 03/10] Update Benchmarks --- test/Benchmarks/Benchmarks.csproj | 14 + .../Exporter/OtlpGrpcExporterBenchmarks.cs | 2 +- .../Exporter/OtlpHttpExporterBenchmarks.cs | 2 +- .../Exporter/OtlpLogExporterBenchmarks.cs | 2 +- .../Exporter/OtlpTraceExporterBenchmarks.cs | 2 +- .../google/rpc/error_details.proto | 37 + .../Implementation/google/rpc/status.proto | 42 ++ .../opentelemetry/proto/collector/README.md | 10 + .../collector/logs/v1/logs_service.proto | 79 ++ .../collector/logs/v1/logs_service_http.yaml | 9 + .../metrics/v1/metrics_service.proto | 79 ++ .../metrics/v1/metrics_service_http.yaml | 9 + .../v1experimental/profiles_service.proto | 78 ++ .../v1experimental/profiles_service_http.yaml | 9 + .../collector/trace/v1/trace_service.proto | 79 ++ .../trace/v1/trace_service_http.yaml | 9 + .../proto/common/v1/common.proto | 81 ++ .../opentelemetry/proto/logs/v1/logs.proto | 211 ++++++ .../proto/metrics/v1/metrics.proto | 691 ++++++++++++++++++ .../v1experimental/pprofextended.proto | 388 ++++++++++ .../profiles/v1experimental/profiles.proto | 191 +++++ .../proto/resource/v1/resource.proto | 37 + .../opentelemetry/proto/trace/v1/trace.proto | 355 +++++++++ test/Benchmarks/TestTraceServiceClient.cs | 17 - 24 files changed, 2412 insertions(+), 21 deletions(-) create mode 100644 test/Benchmarks/Implementation/google/rpc/error_details.proto create mode 100644 test/Benchmarks/Implementation/google/rpc/status.proto create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/collector/README.md create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/common/v1/common.proto create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/logs/v1/logs.proto create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/metrics/v1/metrics.proto create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/resource/v1/resource.proto create mode 100644 test/Benchmarks/Implementation/opentelemetry/proto/trace/v1/trace.proto delete mode 100644 test/Benchmarks/TestTraceServiceClient.cs diff --git a/test/Benchmarks/Benchmarks.csproj b/test/Benchmarks/Benchmarks.csproj index d3b36601264..7e4eb3961e5 100644 --- a/test/Benchmarks/Benchmarks.csproj +++ b/test/Benchmarks/Benchmarks.csproj @@ -3,6 +3,7 @@ Exe $(TargetFrameworksForTests) + $(NoWarn);SA1518;SA1210 @@ -27,4 +28,17 @@ + + + + + + + + + + Implementation + + + diff --git a/test/Benchmarks/Exporter/OtlpGrpcExporterBenchmarks.cs b/test/Benchmarks/Exporter/OtlpGrpcExporterBenchmarks.cs index ba7163a179a..7d880a04ddf 100644 --- a/test/Benchmarks/Exporter/OtlpGrpcExporterBenchmarks.cs +++ b/test/Benchmarks/Exporter/OtlpGrpcExporterBenchmarks.cs @@ -35,7 +35,7 @@ public void GlobalSetup() options, new SdkLimitOptions(), new ExperimentalOptions(), - new ProtobufOtlpExporterTransmissionHandler(new ProtobufOtlpGrpcExportClient(options, options.HttpClientFactory(), "opentelemetry.proto.collector.trace.v1.TraceService/Export"), options.TimeoutMilliseconds)); + new OtlpExporterTransmissionHandler(new OtlpGrpcExportClient(options, options.HttpClientFactory(), "opentelemetry.proto.collector.trace.v1.TraceService/Export"), options.TimeoutMilliseconds)); this.activity = ActivityHelper.CreateTestActivity(); this.activityBatch = new CircularBuffer(this.NumberOfSpans); diff --git a/test/Benchmarks/Exporter/OtlpHttpExporterBenchmarks.cs b/test/Benchmarks/Exporter/OtlpHttpExporterBenchmarks.cs index 9603c147ac4..02953143bcb 100644 --- a/test/Benchmarks/Exporter/OtlpHttpExporterBenchmarks.cs +++ b/test/Benchmarks/Exporter/OtlpHttpExporterBenchmarks.cs @@ -63,7 +63,7 @@ public void GlobalSetup() options, new SdkLimitOptions(), new ExperimentalOptions(), - new ProtobufOtlpExporterTransmissionHandler(new ProtobufOtlpHttpExportClient(options, options.HttpClientFactory(), "v1/traces"), options.TimeoutMilliseconds)); + new OtlpExporterTransmissionHandler(new OtlpHttpExportClient(options, options.HttpClientFactory(), "v1/traces"), options.TimeoutMilliseconds)); this.activity = ActivityHelper.CreateTestActivity(); this.activityBatch = new CircularBuffer(this.NumberOfSpans); diff --git a/test/Benchmarks/Exporter/OtlpLogExporterBenchmarks.cs b/test/Benchmarks/Exporter/OtlpLogExporterBenchmarks.cs index 1ec5f4770d4..8dc435b8fd8 100644 --- a/test/Benchmarks/Exporter/OtlpLogExporterBenchmarks.cs +++ b/test/Benchmarks/Exporter/OtlpLogExporterBenchmarks.cs @@ -16,7 +16,7 @@ using OpenTelemetry.Logs; using OpenTelemetry.Tests; using OpenTelemetryProtocol::OpenTelemetry.Exporter; -using OtlpCollector = OpenTelemetryProtocol::OpenTelemetry.Proto.Collector.Logs.V1; +using OtlpCollector = OpenTelemetry.Proto.Collector.Logs.V1; /* BenchmarkDotNet v0.13.6, Windows 11 (10.0.22621.2134/22H2/2022Update/SunValley2) (Hyper-V) diff --git a/test/Benchmarks/Exporter/OtlpTraceExporterBenchmarks.cs b/test/Benchmarks/Exporter/OtlpTraceExporterBenchmarks.cs index 5f338fa6189..cadbe1e89c4 100644 --- a/test/Benchmarks/Exporter/OtlpTraceExporterBenchmarks.cs +++ b/test/Benchmarks/Exporter/OtlpTraceExporterBenchmarks.cs @@ -16,7 +16,7 @@ using OpenTelemetry.Internal; using OpenTelemetry.Tests; using OpenTelemetryProtocol::OpenTelemetry.Exporter; -using OtlpCollector = OpenTelemetryProtocol::OpenTelemetry.Proto.Collector.Trace.V1; +using OtlpCollector = OpenTelemetry.Proto.Collector.Trace.V1; /* BenchmarkDotNet v0.13.6, Windows 11 (10.0.22621.2134/22H2/2022Update/SunValley2) (Hyper-V) diff --git a/test/Benchmarks/Implementation/google/rpc/error_details.proto b/test/Benchmarks/Implementation/google/rpc/error_details.proto new file mode 100644 index 00000000000..0d5461a7287 --- /dev/null +++ b/test/Benchmarks/Implementation/google/rpc/error_details.proto @@ -0,0 +1,37 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +import "google/protobuf/duration.proto"; + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retries have been reached or a maximum retry delay cap has been +// reached. +message RetryInfo { + // Clients should wait at least this long between retrying the same request. + google.protobuf.Duration retry_delay = 1; +} diff --git a/test/Benchmarks/Implementation/google/rpc/status.proto b/test/Benchmarks/Implementation/google/rpc/status.proto new file mode 100644 index 00000000000..40c5bf318c2 --- /dev/null +++ b/test/Benchmarks/Implementation/google/rpc/status.proto @@ -0,0 +1,42 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +import "google/protobuf/any.proto"; + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +message Status { + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. + int32 code = 1; + + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. + string message = 2; + + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + repeated google.protobuf.Any details = 3; +} diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/README.md b/test/Benchmarks/Implementation/opentelemetry/proto/collector/README.md new file mode 100644 index 00000000000..f82dbb0278b --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/collector/README.md @@ -0,0 +1,10 @@ +# OpenTelemetry Collector Proto + +This package describes the OpenTelemetry collector protocol. + +## Packages + +1. `common` package contains the common messages shared between different services. +2. `trace` package contains the Trace Service protos. +3. `metrics` package contains the Metrics Service protos. +4. `logs` package contains the Logs Service protos. diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto b/test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto new file mode 100644 index 00000000000..8260d8aaeb8 --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto @@ -0,0 +1,79 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.collector.logs.v1; + +import "opentelemetry/proto/logs/v1/logs.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Logs.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.logs.v1"; +option java_outer_classname = "LogsServiceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/collector/logs/v1"; + +// Service that can be used to push logs between one Application instrumented with +// OpenTelemetry and an collector, or between an collector and a central collector (in this +// case logs are sent/received to/from multiple Applications). +service LogsService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportLogsServiceRequest) returns (ExportLogsServiceResponse) {} +} + +message ExportLogsServiceRequest { + // An array of ResourceLogs. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated opentelemetry.proto.logs.v1.ResourceLogs resource_logs = 1; +} + +message ExportLogsServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportLogsPartialSuccess partial_success = 1; +} + +message ExportLogsPartialSuccess { + // The number of rejected log records. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_log_records = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml b/test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml new file mode 100644 index 00000000000..507473b9b3b --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.logs.v1.LogsService.Export + post: /v1/logs + body: "*" \ No newline at end of file diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto b/test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto new file mode 100644 index 00000000000..dd48f1ad3a1 --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto @@ -0,0 +1,79 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.collector.metrics.v1; + +import "opentelemetry/proto/metrics/v1/metrics.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Metrics.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.metrics.v1"; +option java_outer_classname = "MetricsServiceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/collector/metrics/v1"; + +// Service that can be used to push metrics between one Application +// instrumented with OpenTelemetry and a collector, or between a collector and a +// central collector. +service MetricsService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportMetricsServiceRequest) returns (ExportMetricsServiceResponse) {} +} + +message ExportMetricsServiceRequest { + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated opentelemetry.proto.metrics.v1.ResourceMetrics resource_metrics = 1; +} + +message ExportMetricsServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportMetricsPartialSuccess partial_success = 1; +} + +message ExportMetricsPartialSuccess { + // The number of rejected data points. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_data_points = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml b/test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml new file mode 100644 index 00000000000..a5456502607 --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.metrics.v1.MetricsService.Export + post: /v1/metrics + body: "*" \ No newline at end of file diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto b/test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto new file mode 100644 index 00000000000..d0e7894b29c --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto @@ -0,0 +1,78 @@ +// Copyright 2023, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.collector.profiles.v1experimental; + +import "opentelemetry/proto/profiles/v1experimental/profiles.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Profiles.V1Experimental"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.profiles.v1experimental"; +option java_outer_classname = "ProfilesServiceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/collector/profiles/v1experimental"; + +// Service that can be used to push profiles between one Application instrumented with +// OpenTelemetry and a collector, or between a collector and a central collector. +service ProfilesService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportProfilesServiceRequest) returns (ExportProfilesServiceResponse) {} +} + +message ExportProfilesServiceRequest { + // An array of ResourceProfiles. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated opentelemetry.proto.profiles.v1experimental.ResourceProfiles resource_profiles = 1; +} + +message ExportProfilesServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportProfilesPartialSuccess partial_success = 1; +} + +message ExportProfilesPartialSuccess { + // The number of rejected profiles. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_profiles = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml b/test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml new file mode 100644 index 00000000000..ea501afbbb2 --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.profiles.v1experimental.ProfilesService.Export + post: /v1experimental/profiles + body: "*" diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto b/test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto new file mode 100644 index 00000000000..d6fe67f9e55 --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto @@ -0,0 +1,79 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.collector.trace.v1; + +import "opentelemetry/proto/trace/v1/trace.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Trace.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.trace.v1"; +option java_outer_classname = "TraceServiceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/collector/trace/v1"; + +// Service that can be used to push spans between one Application instrumented with +// OpenTelemetry and a collector, or between a collector and a central collector (in this +// case spans are sent/received to/from multiple Applications). +service TraceService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportTraceServiceRequest) returns (ExportTraceServiceResponse) {} +} + +message ExportTraceServiceRequest { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated opentelemetry.proto.trace.v1.ResourceSpans resource_spans = 1; +} + +message ExportTraceServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportTracePartialSuccess partial_success = 1; +} + +message ExportTracePartialSuccess { + // The number of rejected spans. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_spans = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml b/test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml new file mode 100644 index 00000000000..d091b3a8d53 --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.trace.v1.TraceService.Export + post: /v1/traces + body: "*" diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/common/v1/common.proto b/test/Benchmarks/Implementation/opentelemetry/proto/common/v1/common.proto new file mode 100644 index 00000000000..ff8a21a1fa0 --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/common/v1/common.proto @@ -0,0 +1,81 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.common.v1; + +option csharp_namespace = "OpenTelemetry.Proto.Common.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.common.v1"; +option java_outer_classname = "CommonProto"; +option go_package = "go.opentelemetry.io/proto/otlp/common/v1"; + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +message AnyValue { + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "empty". + oneof value { + string string_value = 1; + bool bool_value = 2; + int64 int_value = 3; + double double_value = 4; + ArrayValue array_value = 5; + KeyValueList kvlist_value = 6; + bytes bytes_value = 7; + } +} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +message ArrayValue { + // Array of values. The array may be empty (contain 0 elements). + repeated AnyValue values = 1; +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +message KeyValueList { + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + // The keys MUST be unique (it is not allowed to have more than one + // value with the same key). + repeated KeyValue values = 1; +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +message KeyValue { + string key = 1; + AnyValue value = 2; +} + +// InstrumentationScope is a message representing the instrumentation scope information +// such as the fully qualified name and version. +message InstrumentationScope { + // An empty instrumentation scope name means the name is unknown. + string name = 1; + string version = 2; + + // Additional attributes that describe the scope. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated KeyValue attributes = 3; + uint32 dropped_attributes_count = 4; +} diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/logs/v1/logs.proto b/test/Benchmarks/Implementation/opentelemetry/proto/logs/v1/logs.proto new file mode 100644 index 00000000000..f9b97dd7451 --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/logs/v1/logs.proto @@ -0,0 +1,211 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.logs.v1; + +import "opentelemetry/proto/common/v1/common.proto"; +import "opentelemetry/proto/resource/v1/resource.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Logs.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.logs.v1"; +option java_outer_classname = "LogsProto"; +option go_package = "go.opentelemetry.io/proto/otlp/logs/v1"; + +// LogsData represents the logs data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP logs data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message LogsData { + // An array of ResourceLogs. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceLogs resource_logs = 1; +} + +// A collection of ScopeLogs from a Resource. +message ResourceLogs { + reserved 1000; + + // The resource for the logs in this message. + // If this field is not set then resource info is unknown. + opentelemetry.proto.resource.v1.Resource resource = 1; + + // A list of ScopeLogs that originate from a resource. + repeated ScopeLogs scope_logs = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_logs" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of Logs produced by a Scope. +message ScopeLogs { + // The instrumentation scope information for the logs in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + opentelemetry.proto.common.v1.InstrumentationScope scope = 1; + + // A list of log records. + repeated LogRecord log_records = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the log data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all logs in the "logs" field. + string schema_url = 3; +} + +// Possible values for LogRecord.SeverityNumber. +enum SeverityNumber { + // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. + SEVERITY_NUMBER_UNSPECIFIED = 0; + SEVERITY_NUMBER_TRACE = 1; + SEVERITY_NUMBER_TRACE2 = 2; + SEVERITY_NUMBER_TRACE3 = 3; + SEVERITY_NUMBER_TRACE4 = 4; + SEVERITY_NUMBER_DEBUG = 5; + SEVERITY_NUMBER_DEBUG2 = 6; + SEVERITY_NUMBER_DEBUG3 = 7; + SEVERITY_NUMBER_DEBUG4 = 8; + SEVERITY_NUMBER_INFO = 9; + SEVERITY_NUMBER_INFO2 = 10; + SEVERITY_NUMBER_INFO3 = 11; + SEVERITY_NUMBER_INFO4 = 12; + SEVERITY_NUMBER_WARN = 13; + SEVERITY_NUMBER_WARN2 = 14; + SEVERITY_NUMBER_WARN3 = 15; + SEVERITY_NUMBER_WARN4 = 16; + SEVERITY_NUMBER_ERROR = 17; + SEVERITY_NUMBER_ERROR2 = 18; + SEVERITY_NUMBER_ERROR3 = 19; + SEVERITY_NUMBER_ERROR4 = 20; + SEVERITY_NUMBER_FATAL = 21; + SEVERITY_NUMBER_FATAL2 = 22; + SEVERITY_NUMBER_FATAL3 = 23; + SEVERITY_NUMBER_FATAL4 = 24; +} + +// LogRecordFlags represents constants used to interpret the +// LogRecord.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) +// +enum LogRecordFlags { + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + LOG_RECORD_FLAGS_DO_NOT_USE = 0; + + // Bits 0-7 are used for trace flags. + LOG_RECORD_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; + + // Bits 8-31 are reserved for future use. +} + +// A log record according to OpenTelemetry Log Data Model: +// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md +message LogRecord { + reserved 4; + + // time_unix_nano is the time when the event occurred. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // Value of 0 indicates unknown or missing timestamp. + fixed64 time_unix_nano = 1; + + // Time when the event was observed by the collection system. + // For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) + // this timestamp is typically set at the generation time and is equal to Timestamp. + // For events originating externally and collected by OpenTelemetry (e.g. using + // Collector) this is the time when OpenTelemetry's code observed the event measured + // by the clock of the OpenTelemetry code. This field MUST be set once the event is + // observed by OpenTelemetry. + // + // For converting OpenTelemetry log data to formats that support only one timestamp or + // when receiving OpenTelemetry log data by recipients that support only one timestamp + // internally the following logic is recommended: + // - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // Value of 0 indicates unknown or missing timestamp. + fixed64 observed_time_unix_nano = 11; + + // Numerical value of the severity, normalized to values described in Log Data Model. + // [Optional]. + SeverityNumber severity_number = 2; + + // The severity text (also known as log level). The original string representation as + // it is known at the source. [Optional]. + string severity_text = 3; + + // A value containing the body of the log record. Can be for example a human-readable + // string message (including multi-line) describing the event in a free form or it can + // be a structured data composed of arrays and maps of other values. [Optional]. + opentelemetry.proto.common.v1.AnyValue body = 5; + + // Additional attributes that describe the specific event occurrence. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 6; + uint32 dropped_attributes_count = 7; + + // Flags, a bit field. 8 least significant bits are the trace flags as + // defined in W3C Trace Context specification. 24 most significant bits are reserved + // and must be set to 0. Readers must not assume that 24 most significant bits + // will be zero and must correctly mask the bits when reading 8-bit trace flag (use + // flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional]. + fixed32 flags = 8; + + // A unique identifier for a trace. All logs from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is optional. + // + // The receivers SHOULD assume that the log record is not associated with a + // trace if any of the following is true: + // - the field is not present, + // - the field contains an invalid value. + bytes trace_id = 9; + + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is optional. If the sender specifies a valid span_id then it SHOULD also + // specify a valid trace_id. + // + // The receivers SHOULD assume that the log record is not associated with a + // span if any of the following is true: + // - the field is not present, + // - the field contains an invalid value. + bytes span_id = 10; +} diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/metrics/v1/metrics.proto b/test/Benchmarks/Implementation/opentelemetry/proto/metrics/v1/metrics.proto new file mode 100644 index 00000000000..19bb7ff8d53 --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/metrics/v1/metrics.proto @@ -0,0 +1,691 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.metrics.v1; + +import "opentelemetry/proto/common/v1/common.proto"; +import "opentelemetry/proto/resource/v1/resource.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.metrics.v1"; +option java_outer_classname = "MetricsProto"; +option go_package = "go.opentelemetry.io/proto/otlp/metrics/v1"; + +// MetricsData represents the metrics data that can be stored in a persistent +// storage, OR can be embedded by other protocols that transfer OTLP metrics +// data but do not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message MetricsData { + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceMetrics resource_metrics = 1; +} + +// A collection of ScopeMetrics from a Resource. +message ResourceMetrics { + reserved 1000; + + // The resource for the metrics in this message. + // If this field is not set then no resource info is known. + opentelemetry.proto.resource.v1.Resource resource = 1; + + // A list of metrics that originate from a resource. + repeated ScopeMetrics scope_metrics = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_metrics" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of Metrics produced by an Scope. +message ScopeMetrics { + // The instrumentation scope information for the metrics in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + opentelemetry.proto.common.v1.InstrumentationScope scope = 1; + + // A list of metrics that originate from an instrumentation library. + repeated Metric metrics = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the metric data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all metrics in the "metrics" field. + string schema_url = 3; +} + +// Defines a Metric which has one or more timeseries. The following is a +// brief summary of the Metric data model. For more details, see: +// +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md +// +// +// The data model and relation between entities is shown in the +// diagram below. Here, "DataPoint" is the term used to refer to any +// one of the specific data point value types, and "points" is the term used +// to refer to any one of the lists of points contained in the Metric. +// +// - Metric is composed of a metadata and data. +// - Metadata part contains a name, description, unit. +// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). +// - DataPoint contains timestamps, attributes, and one of the possible value type +// fields. +// +// Metric +// +------------+ +// |name | +// |description | +// |unit | +------------------------------------+ +// |data |---> |Gauge, Sum, Histogram, Summary, ... | +// +------------+ +------------------------------------+ +// +// Data [One of Gauge, Sum, Histogram, Summary, ...] +// +-----------+ +// |... | // Metadata about the Data. +// |points |--+ +// +-----------+ | +// | +---------------------------+ +// | |DataPoint 1 | +// v |+------+------+ +------+ | +// +-----+ ||label |label |...|label | | +// | 1 |-->||value1|value2|...|valueN| | +// +-----+ |+------+------+ +------+ | +// | . | |+-----+ | +// | . | ||value| | +// | . | |+-----+ | +// | . | +---------------------------+ +// | . | . +// | . | . +// | . | . +// | . | +---------------------------+ +// | . | |DataPoint M | +// +-----+ |+------+------+ +------+ | +// | M |-->||label |label |...|label | | +// +-----+ ||value1|value2|...|valueN| | +// |+------+------+ +------+ | +// |+-----+ | +// ||value| | +// |+-----+ | +// +---------------------------+ +// +// Each distinct type of DataPoint represents the output of a specific +// aggregation function, the result of applying the DataPoint's +// associated function of to one or more measurements. +// +// All DataPoint types have three common fields: +// - Attributes includes key-value pairs associated with the data point +// - TimeUnixNano is required, set to the end time of the aggregation +// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints +// having an AggregationTemporality field, as discussed below. +// +// Both TimeUnixNano and StartTimeUnixNano values are expressed as +// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. +// +// # TimeUnixNano +// +// This field is required, having consistent interpretation across +// DataPoint types. TimeUnixNano is the moment corresponding to when +// the data point's aggregate value was captured. +// +// Data points with the 0 value for TimeUnixNano SHOULD be rejected +// by consumers. +// +// # StartTimeUnixNano +// +// StartTimeUnixNano in general allows detecting when a sequence of +// observations is unbroken. This field indicates to consumers the +// start time for points with cumulative and delta +// AggregationTemporality, and it should be included whenever possible +// to support correct rate calculation. Although it may be omitted +// when the start time is truly unknown, setting StartTimeUnixNano is +// strongly encouraged. +message Metric { + reserved 4, 6, 8; + + // name of the metric. + string name = 1; + + // description of the metric, which can be used in documentation. + string description = 2; + + // unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + string unit = 3; + + // Data determines the aggregation type (if any) of the metric, what is the + // reported value type for the data points, as well as the relatationship to + // the time interval over which they are reported. + oneof data { + Gauge gauge = 5; + Sum sum = 7; + Histogram histogram = 9; + ExponentialHistogram exponential_histogram = 10; + Summary summary = 11; + } + + // Additional metadata attributes that describe the metric. [Optional]. + // Attributes are non-identifying. + // Consumers SHOULD NOT need to be aware of these attributes. + // These attributes MAY be used to encode information allowing + // for lossless roundtrip translation to / from another data model. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue metadata = 12; +} + +// Gauge represents the type of a scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +message Gauge { + repeated NumberDataPoint data_points = 1; +} + +// Sum represents the type of a scalar metric that is calculated as a sum of all +// reported measurements over a time interval. +message Sum { + repeated NumberDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; + + // If "true" means that the sum is monotonic. + bool is_monotonic = 3; +} + +// Histogram represents the type of a metric that is calculated by aggregating +// as a Histogram of all reported measurements over a time interval. +message Histogram { + repeated HistogramDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; +} + +// ExponentialHistogram represents the type of a metric that is calculated by aggregating +// as a ExponentialHistogram of all reported double measurements over a time interval. +message ExponentialHistogram { + repeated ExponentialHistogramDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; +} + +// Summary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) +// data type. These data points cannot always be merged in a meaningful way. +// While they can be useful in some applications, histogram data points are +// recommended for new applications. +message Summary { + repeated SummaryDataPoint data_points = 1; +} + +// AggregationTemporality defines how a metric aggregator reports aggregated +// values. It describes how those values relate to the time interval over +// which they are aggregated. +enum AggregationTemporality { + // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; + + // DELTA is an AggregationTemporality for a metric aggregator which reports + // changes since last report time. Successive metrics contain aggregation of + // values from continuous and non-overlapping intervals. + // + // The values for a DELTA metric are based only on the time interval + // associated with one measurement cycle. There is no dependency on + // previous measurements like is the case for CUMULATIVE metrics. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // DELTA metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0+1 to + // t_0+2 with a value of 2. + AGGREGATION_TEMPORALITY_DELTA = 1; + + // CUMULATIVE is an AggregationTemporality for a metric aggregator which + // reports changes since a fixed start time. This means that current values + // of a CUMULATIVE metric depend on all previous measurements since the + // start time. Because of this, the sender is required to retain this state + // in some form. If this state is lost or invalidated, the CUMULATIVE metric + // values MUST be reset and a new fixed start time following the last + // reported measurement time sent MUST be used. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // CUMULATIVE metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+2 with a value of 5. + // 9. The system experiences a fault and loses state. + // 10. The system recovers and resumes receiving at time=t_1. + // 11. A request is received, the system measures 1 request. + // 12. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_1 to + // t_0+1 with a value of 1. + // + // Note: Even though, when reporting changes since last report time, using + // CUMULATIVE is valid, it is not recommended. This may cause problems for + // systems that do not use start_time to determine when the aggregation + // value was reset (e.g. Prometheus). + AGGREGATION_TEMPORALITY_CUMULATIVE = 2; +} + +// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a +// bit-field representing 32 distinct boolean flags. Each flag defined in this +// enum is a bit-mask. To test the presence of a single flag in the flags of +// a data point, for example, use an expression like: +// +// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK +// +enum DataPointFlags { + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + DATA_POINT_FLAGS_DO_NOT_USE = 0; + + // This DataPoint is valid but has no recorded value. This value + // SHOULD be used to reflect explicitly missing data in a series, as + // for an equivalent to the Prometheus "staleness marker". + DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1; + + // Bits 2-31 are reserved for future use. +} + +// NumberDataPoint is a single data point in a timeseries that describes the +// time-varying scalar value of a metric. +message NumberDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // The value itself. A point is considered invalid when one of the recognized + // value fields is not present inside this oneof. + oneof value { + double as_double = 4; + sfixed64 as_int = 6; + } + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 5; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 8; +} + +// HistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Histogram. A Histogram contains summary statistics +// for a population of values, it may optionally contain the distribution of +// those values across a set of buckets. +// +// If the histogram contains the distribution of values, then both +// "explicit_bounds" and "bucket counts" fields must be defined. +// If the histogram does not contain the distribution of values, then both +// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and +// "sum" are known. +message HistogramDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + optional double sum = 5; + + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + repeated fixed64 bucket_counts = 6; + + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // + // The boundaries for bucket at index i are: + // + // (-infinity, explicit_bounds[i]] for i == 0 + // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) + // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) + // + // The values in the explicit_bounds array must be strictly increasing. + // + // Histogram buckets are inclusive of their upper boundary, except the last + // bucket where the boundary is at infinity. This format is intentionally + // compatible with the OpenMetrics histogram definition. + repeated double explicit_bounds = 7; + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 8; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 10; + + // min is the minimum value over (start_time, end_time]. + optional double min = 11; + + // max is the maximum value over (start_time, end_time]. + optional double max = 12; +} + +// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +// +message ExponentialHistogramDataPoint { + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be + // non-negative. This value must be equal to the sum of the "bucket_counts" + // values in the positive and negative Buckets plus the "zero_count" field. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + optional double sum = 5; + + // scale describes the resolution of the histogram. Boundaries are + // located at powers of the base, where: + // + // base = (2^(2^-scale)) + // + // The histogram bucket identified by `index`, a signed integer, + // contains values that are greater than (base^index) and + // less than or equal to (base^(index+1)). + // + // The positive and negative ranges of the histogram are expressed + // separately. Negative values are mapped by their absolute value + // into the negative range using the same scale as the positive range. + // + // scale is not restricted by the protocol, as the permissible + // values depend on the range of the data. + sint32 scale = 6; + + // zero_count is the count of values that are either exactly zero or + // within the region considered zero by the instrumentation at the + // tolerated degree of precision. This bucket stores values that + // cannot be expressed using the standard exponential formula as + // well as values that have been rounded to zero. + // + // Implementations MAY consider the zero bucket to have probability + // mass equal to (zero_count / count). + fixed64 zero_count = 7; + + // positive carries the positive range of exponential bucket counts. + Buckets positive = 8; + + // negative carries the negative range of exponential bucket counts. + Buckets negative = 9; + + // Buckets are a set of bucket counts, encoded in a contiguous array + // of counts. + message Buckets { + // Offset is the bucket index of the first entry in the bucket_counts array. + // + // Note: This uses a varint encoding as a simple form of compression. + sint32 offset = 1; + + // bucket_counts is an array of count values, where bucket_counts[i] carries + // the count of the bucket at index (offset+i). bucket_counts[i] is the count + // of values greater than base^(offset+i) and less than or equal to + // base^(offset+i+1). + // + // Note: By contrast, the explicit HistogramDataPoint uses + // fixed64. This field is expected to have many buckets, + // especially zeros, so uint64 has been selected to ensure + // varint encoding. + repeated uint64 bucket_counts = 2; + } + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 10; + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 11; + + // min is the minimum value over (start_time, end_time]. + optional double min = 12; + + // max is the maximum value over (start_time, end_time]. + optional double max = 13; + + // ZeroThreshold may be optionally set to convey the width of the zero + // region. Where the zero region is defined as the closed interval + // [-ZeroThreshold, ZeroThreshold]. + // When ZeroThreshold is 0, zero count bucket stores values that cannot be + // expressed using the standard exponential formula as well as values that + // have been rounded to zero. + double zero_threshold = 14; +} + +// SummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +message SummaryDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be non-negative. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary + double sum = 5; + + // Represents the value at a given quantile of a distribution. + // + // To record Min and Max values following conventions are used: + // - The 1.0 quantile is equivalent to the maximum value observed. + // - The 0.0 quantile is equivalent to the minimum value observed. + // + // See the following issue for more context: + // https://github.com/open-telemetry/opentelemetry-proto/issues/125 + message ValueAtQuantile { + // The quantile of a distribution. Must be in the interval + // [0.0, 1.0]. + double quantile = 1; + + // The value at the given quantile of a distribution. + // + // Quantile values must NOT be negative. + double value = 2; + } + + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + repeated ValueAtQuantile quantile_values = 6; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 8; +} + +// A representation of an exemplar, which is a sample input measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +message Exemplar { + reserved 1; + + // The set of key/value pairs that were filtered out by the aggregator, but + // recorded alongside the original measurement. Only key/value pairs that were + // filtered out by the aggregator should be included + repeated opentelemetry.proto.common.v1.KeyValue filtered_attributes = 7; + + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 2; + + // The value of the measurement that was recorded. An exemplar is + // considered invalid when one of the recognized value fields is not present + // inside this oneof. + oneof value { + double as_double = 3; + sfixed64 as_int = 6; + } + + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + bytes span_id = 4; + + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + bytes trace_id = 5; +} diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto b/test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto new file mode 100644 index 00000000000..b5b5b88fcef --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto @@ -0,0 +1,388 @@ +// Copyright 2023, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file includes work covered by the following copyright and permission notices: +// +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Profile is a common stacktrace profile format. +// +// Measurements represented with this format should follow the +// following conventions: +// +// - Consumers should treat unset optional fields as if they had been +// set with their default value. +// +// - When possible, measurements should be stored in "unsampled" form +// that is most useful to humans. There should be enough +// information present to determine the original sampled values. +// +// - On-disk, the serialized proto must be gzip-compressed. +// +// - The profile is represented as a set of samples, where each sample +// references a sequence of locations, and where each location belongs +// to a mapping. +// - There is a N->1 relationship from sample.location_id entries to +// locations. For every sample.location_id entry there must be a +// unique Location with that index. +// - There is an optional N->1 relationship from locations to +// mappings. For every nonzero Location.mapping_id there must be a +// unique Mapping with that index. + +syntax = "proto3"; + +package opentelemetry.proto.profiles.v1experimental; + +import "opentelemetry/proto/common/v1/common.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.profiles.v1experimental"; +option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1experimental"; + +// Represents a complete profile, including sample types, samples, +// mappings to binaries, locations, functions, string table, and additional metadata. +message Profile { + // A description of the samples associated with each Sample.value. + // For a cpu profile this might be: + // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] + // For a heap profile, this might be: + // [["allocations","count"], ["space","bytes"]], + // If one of the values represents the number of events represented + // by the sample, by convention it should be at index 0 and use + // sample_type.unit == "count". + repeated ValueType sample_type = 1; + // The set of samples recorded in this profile. + repeated Sample sample = 2; + // Mapping from address ranges to the image/binary/library mapped + // into that address range. mapping[0] will be the main binary. + repeated Mapping mapping = 3; + // Locations referenced by samples via location_indices. + repeated Location location = 4; + // Array of locations referenced by samples. + repeated int64 location_indices = 15; + // Functions referenced by locations. + repeated Function function = 5; + // Lookup table for attributes. + repeated opentelemetry.proto.common.v1.KeyValue attribute_table = 16; + // Represents a mapping between Attribute Keys and Units. + repeated AttributeUnit attribute_units = 17; + // Lookup table for links. + repeated Link link_table = 18; + // A common table for strings referenced by various messages. + // string_table[0] must always be "". + repeated string string_table = 6; + // frames with Function.function_name fully matching the following + // regexp will be dropped from the samples, along with their successors. + int64 drop_frames = 7; // Index into string table. + // frames with Function.function_name fully matching the following + // regexp will be kept, even if it matches drop_frames. + int64 keep_frames = 8; // Index into string table. + + // The following fields are informational, do not affect + // interpretation of results. + + // Time of collection (UTC) represented as nanoseconds past the epoch. + int64 time_nanos = 9; + // Duration of the profile, if a duration makes sense. + int64 duration_nanos = 10; + // The kind of events between sampled occurrences. + // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] + ValueType period_type = 11; + // The number of events between sampled occurrences. + int64 period = 12; + // Free-form text associated with the profile. The text is displayed as is + // to the user by the tools that read profiles (e.g. by pprof). This field + // should not be used to store any machine-readable information, it is only + // for human-friendly content. The profile must stay functional if this field + // is cleaned. + repeated int64 comment = 13; // Indices into string table. + // Index into the string table of the type of the preferred sample + // value. If unset, clients should default to the last sample value. + int64 default_sample_type = 14; +} + +// Represents a mapping between Attribute Keys and Units. +message AttributeUnit { + // Index into string table. + int64 attribute_key = 1; + // Index into string table. + int64 unit = 2; +} + +// A pointer from a profile Sample to a trace Span. +// Connects a profile sample to a trace span, identified by unique trace and span IDs. +message Link { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + bytes trace_id = 1; + + // A unique identifier for the linked span. The ID is an 8-byte array. + bytes span_id = 2; +} + +// Specifies the method of aggregating metric values, either DELTA (change since last report) +// or CUMULATIVE (total since a fixed start time). +enum AggregationTemporality { + /* UNSPECIFIED is the default AggregationTemporality, it MUST not be used. */ + AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; + + /** DELTA is an AggregationTemporality for a profiler which reports + changes since last report time. Successive metrics contain aggregation of + values from continuous and non-overlapping intervals. + + The values for a DELTA metric are based only on the time interval + associated with one measurement cycle. There is no dependency on + previous measurements like is the case for CUMULATIVE metrics. + + For example, consider a system measuring the number of requests that + it receives and reports the sum of these requests every second as a + DELTA metric: + + 1. The system starts receiving at time=t_0. + 2. A request is received, the system measures 1 request. + 3. A request is received, the system measures 1 request. + 4. A request is received, the system measures 1 request. + 5. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+1 with a value of 3. + 6. A request is received, the system measures 1 request. + 7. A request is received, the system measures 1 request. + 8. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0+1 to + t_0+2 with a value of 2. */ + AGGREGATION_TEMPORALITY_DELTA = 1; + + /** CUMULATIVE is an AggregationTemporality for a profiler which + reports changes since a fixed start time. This means that current values + of a CUMULATIVE metric depend on all previous measurements since the + start time. Because of this, the sender is required to retain this state + in some form. If this state is lost or invalidated, the CUMULATIVE metric + values MUST be reset and a new fixed start time following the last + reported measurement time sent MUST be used. + + For example, consider a system measuring the number of requests that + it receives and reports the sum of these requests every second as a + CUMULATIVE metric: + + 1. The system starts receiving at time=t_0. + 2. A request is received, the system measures 1 request. + 3. A request is received, the system measures 1 request. + 4. A request is received, the system measures 1 request. + 5. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+1 with a value of 3. + 6. A request is received, the system measures 1 request. + 7. A request is received, the system measures 1 request. + 8. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+2 with a value of 5. + 9. The system experiences a fault and loses state. + 10. The system recovers and resumes receiving at time=t_1. + 11. A request is received, the system measures 1 request. + 12. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_1 to + t_1+1 with a value of 1. + + Note: Even though, when reporting changes since last report time, using + CUMULATIVE is valid, it is not recommended. */ + AGGREGATION_TEMPORALITY_CUMULATIVE = 2; +} + +// ValueType describes the type and units of a value, with an optional aggregation temporality. +message ValueType { + int64 type = 1; // Index into string table. + int64 unit = 2; // Index into string table. + + AggregationTemporality aggregation_temporality = 3; +} + +// Each Sample records values encountered in some program +// context. The program context is typically a stack trace, perhaps +// augmented with auxiliary information like the thread-id, some +// indicator of a higher level request being handled etc. +message Sample { + // The indices recorded here correspond to locations in Profile.location. + // The leaf is at location_index[0]. [deprecated, superseded by locations_start_index / locations_length] + repeated uint64 location_index = 1; + // locations_start_index along with locations_length refers to to a slice of locations in Profile.location. + // Supersedes location_index. + uint64 locations_start_index = 7; + // locations_length along with locations_start_index refers to a slice of locations in Profile.location. + // Supersedes location_index. + uint64 locations_length = 8; + // A 128bit id that uniquely identifies this stacktrace, globally. Index into string table. [optional] + uint32 stacktrace_id_index = 9; + // The type and unit of each value is defined by the corresponding + // entry in Profile.sample_type. All samples must have the same + // number of values, the same as the length of Profile.sample_type. + // When aggregating multiple samples into a single sample, the + // result has a list of values that is the element-wise sum of the + // lists of the originals. + repeated int64 value = 2; + // label includes additional context for this sample. It can include + // things like a thread id, allocation size, etc. + // + // NOTE: While possible, having multiple values for the same label key is + // strongly discouraged and should never be used. Most tools (e.g. pprof) do + // not have good (or any) support for multi-value labels. And an even more + // discouraged case is having a string label and a numeric label of the same + // name on a sample. Again, possible to express, but should not be used. + // [deprecated, superseded by attributes] + repeated Label label = 3; + // References to attributes in Profile.attribute_table. [optional] + repeated uint64 attributes = 10; + + // Reference to link in Profile.link_table. [optional] + uint64 link = 12; + + // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected + // to fall within the Profile's time range. [optional] + repeated uint64 timestamps_unix_nano = 13; +} + +// Provides additional context for a sample, +// such as thread ID or allocation size, with optional units. [deprecated] +message Label { + int64 key = 1; // Index into string table + + // At most one of the following must be present + int64 str = 2; // Index into string table + int64 num = 3; + + // Should only be present when num is present. + // Specifies the units of num. + // Use arbitrary string (for example, "requests") as a custom count unit. + // If no unit is specified, consumer may apply heuristic to deduce the unit. + // Consumers may also interpret units like "bytes" and "kilobytes" as memory + // units and units like "seconds" and "nanoseconds" as time units, + // and apply appropriate unit conversions to these. + int64 num_unit = 4; // Index into string table +} + +// Indicates the semantics of the build_id field. +enum BuildIdKind { + // Linker-generated build ID, stored in the ELF binary notes. + BUILD_ID_LINKER = 0; + // Build ID based on the content hash of the binary. Currently no particular + // hashing approach is standardized, so a given producer needs to define it + // themselves and thus unlike BUILD_ID_LINKER this kind of hash is producer-specific. + // We may choose to provide a standardized stable hash recommendation later. + BUILD_ID_BINARY_HASH = 1; +} + +// Describes the mapping of a binary in memory, including its address range, +// file offset, and metadata like build ID +message Mapping { + // Unique nonzero id for the mapping. [deprecated] + uint64 id = 1; + // Address at which the binary (or DLL) is loaded into memory. + uint64 memory_start = 2; + // The limit of the address range occupied by this mapping. + uint64 memory_limit = 3; + // Offset in the binary that corresponds to the first mapped address. + uint64 file_offset = 4; + // The object this entry is loaded from. This can be a filename on + // disk for the main binary and shared libraries, or virtual + // abstractions like "[vdso]". + int64 filename = 5; // Index into string table + // A string that uniquely identifies a particular program version + // with high probability. E.g., for binaries generated by GNU tools, + // it could be the contents of the .note.gnu.build-id field. + int64 build_id = 6; // Index into string table + // Specifies the kind of build id. See BuildIdKind enum for more details [optional] + BuildIdKind build_id_kind = 11; + // References to attributes in Profile.attribute_table. [optional] + repeated uint64 attributes = 12; + // The following fields indicate the resolution of symbolic info. + bool has_functions = 7; + bool has_filenames = 8; + bool has_line_numbers = 9; + bool has_inline_frames = 10; +} + +// Describes function and line table debug information. +message Location { + // Unique nonzero id for the location. A profile could use + // instruction addresses or any integer sequence as ids. [deprecated] + uint64 id = 1; + // The index of the corresponding profile.Mapping for this location. + // It can be unset if the mapping is unknown or not applicable for + // this profile type. + uint64 mapping_index = 2; + // The instruction address for this location, if available. It + // should be within [Mapping.memory_start...Mapping.memory_limit] + // for the corresponding mapping. A non-leaf address may be in the + // middle of a call instruction. It is up to display tools to find + // the beginning of the instruction if necessary. + uint64 address = 3; + // Multiple line indicates this location has inlined functions, + // where the last entry represents the caller into which the + // preceding entries were inlined. + // + // E.g., if memcpy() is inlined into printf: + // line[0].function_name == "memcpy" + // line[1].function_name == "printf" + repeated Line line = 4; + // Provides an indication that multiple symbols map to this location's + // address, for example due to identical code folding by the linker. In that + // case the line information above represents one of the multiple + // symbols. This field must be recomputed when the symbolization state of the + // profile changes. + bool is_folded = 5; + + // Type of frame (e.g. kernel, native, python, hotspot, php). Index into string table. + uint32 type_index = 6; + + // References to attributes in Profile.attribute_table. [optional] + repeated uint64 attributes = 7; +} + +// Details a specific line in a source code, linked to a function. +message Line { + // The index of the corresponding profile.Function for this line. + uint64 function_index = 1; + // Line number in source code. + int64 line = 2; + // Column number in source code. + int64 column = 3; +} + +// Describes a function, including its human-readable name, system name, +// source file, and starting line number in the source. +message Function { + // Unique nonzero id for the function. [deprecated] + uint64 id = 1; + // Name of the function, in human-readable form if available. + int64 name = 2; // Index into string table + // Name of the function, as identified by the system. + // For instance, it can be a C++ mangled name. + int64 system_name = 3; // Index into string table + // Source file containing the function. + int64 filename = 4; // Index into string table + // Line number in source file. + int64 start_line = 5; +} diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto b/test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto new file mode 100644 index 00000000000..84b0108f86a --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto @@ -0,0 +1,191 @@ +// Copyright 2023, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.profiles.v1experimental; + +import "opentelemetry/proto/common/v1/common.proto"; +import "opentelemetry/proto/resource/v1/resource.proto"; +import "opentelemetry/proto/profiles/v1experimental/pprofextended.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.profiles.v1experimental"; +option java_outer_classname = "ProfilesProto"; +option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1experimental"; + +// Relationships Diagram +// +// ┌──────────────────┐ LEGEND +// │ ProfilesData │ +// └──────────────────┘ ─────▶ embedded +// │ +// │ 1-n ─────▷ referenced by index +// ▼ +// ┌──────────────────┐ +// │ ResourceProfiles │ +// └──────────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ ScopeProfiles │ +// └──────────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ ProfileContainer │ +// └──────────────────┘ +// │ +// │ 1-1 +// ▼ +// ┌──────────────────┐ +// │ Profile │ +// └──────────────────┘ +// │ n-1 +// │ 1-n ┌───────────────────────────────────────┐ +// ▼ │ ▽ +// ┌──────────────────┐ 1-n ┌──────────────┐ ┌──────────┐ +// │ Sample │ ──────▷ │ KeyValue │ │ Link │ +// └──────────────────┘ └──────────────┘ └──────────┘ +// │ 1-n △ △ +// │ 1-n ┌─────────────────┘ │ 1-n +// ▽ │ │ +// ┌──────────────────┐ n-1 ┌──────────────┐ +// │ Location │ ──────▷ │ Mapping │ +// └──────────────────┘ └──────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ Line │ +// └──────────────────┘ +// │ +// │ 1-1 +// ▽ +// ┌──────────────────┐ +// │ Function │ +// └──────────────────┘ +// + +// ProfilesData represents the profiles data that can be stored in persistent storage, +// OR can be embedded by other protocols that transfer OTLP profiles data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message ProfilesData { + // An array of ResourceProfiles. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceProfiles resource_profiles = 1; +} + + +// A collection of ScopeProfiles from a Resource. +message ResourceProfiles { + reserved 1000; + + // The resource for the profiles in this message. + // If this field is not set then no resource info is known. + opentelemetry.proto.resource.v1.Resource resource = 1; + + // A list of ScopeProfiles that originate from a resource. + repeated ScopeProfiles scope_profiles = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_profiles" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of ProfileContainers produced by an InstrumentationScope. +message ScopeProfiles { + // The instrumentation scope information for the profiles in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + opentelemetry.proto.common.v1.InstrumentationScope scope = 1; + + // A list of ProfileContainers that originate from an instrumentation scope. + repeated ProfileContainer profiles = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the metric data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all profiles in the "profiles" field. + string schema_url = 3; +} + +// A ProfileContainer represents a single profile. It wraps pprof profile with OpenTelemetry specific metadata. +message ProfileContainer { + // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with + // all zeroes is considered invalid. + // + // This field is required. + bytes profile_id = 1; + + // start_time_unix_nano is the start time of the profile. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 start_time_unix_nano = 2; + + // end_time_unix_nano is the end time of the profile. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 end_time_unix_nano = 3; + + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; + + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + uint32 dropped_attributes_count = 5; + + // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] + string original_payload_format = 6; + + // Original payload can be stored in this field. This can be useful for users who want to get the original payload. + // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. + // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. + // If the original payload is in pprof format, it SHOULD not be included in this field. + // The field is optional, however if it is present `profile` MUST be present and contain the same profiling information. + bytes original_payload = 7; + + // This is a reference to a pprof profile. Required, even when original_payload is present. + opentelemetry.proto.profiles.v1experimental.Profile profile = 8; +} diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/resource/v1/resource.proto b/test/Benchmarks/Implementation/opentelemetry/proto/resource/v1/resource.proto new file mode 100644 index 00000000000..6637560bc35 --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/resource/v1/resource.proto @@ -0,0 +1,37 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.resource.v1; + +import "opentelemetry/proto/common/v1/common.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Resource.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.resource.v1"; +option java_outer_classname = "ResourceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/resource/v1"; + +// Resource information. +message Resource { + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + uint32 dropped_attributes_count = 2; +} diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/trace/v1/trace.proto b/test/Benchmarks/Implementation/opentelemetry/proto/trace/v1/trace.proto new file mode 100644 index 00000000000..5cb2f3ce1cd --- /dev/null +++ b/test/Benchmarks/Implementation/opentelemetry/proto/trace/v1/trace.proto @@ -0,0 +1,355 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.trace.v1; + +import "opentelemetry/proto/common/v1/common.proto"; +import "opentelemetry/proto/resource/v1/resource.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Trace.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.trace.v1"; +option java_outer_classname = "TraceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/trace/v1"; + +// TracesData represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message TracesData { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceSpans resource_spans = 1; +} + +// A collection of ScopeSpans from a Resource. +message ResourceSpans { + reserved 1000; + + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + opentelemetry.proto.resource.v1.Resource resource = 1; + + // A list of ScopeSpans that originate from a resource. + repeated ScopeSpans scope_spans = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of Spans produced by an InstrumentationScope. +message ScopeSpans { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + opentelemetry.proto.common.v1.InstrumentationScope scope = 1; + + // A list of Spans that originate from an instrumentation scope. + repeated Span spans = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + string schema_url = 3; +} + +// A Span represents a single operation performed by a single component of the system. +// +// The next available field id is 17. +message Span { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + bytes trace_id = 1; + + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + bytes span_id = 2; + + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + string trace_state = 3; + + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + bytes parent_span_id = 4; + + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + fixed32 flags = 16; + + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + string name = 5; + + // SpanKind is the type of span. Can be used to specify additional relationships between spans + // in addition to a parent/child relationship. + enum SpanKind { + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + SPAN_KIND_UNSPECIFIED = 0; + + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SPAN_KIND_INTERNAL = 1; + + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SPAN_KIND_SERVER = 2; + + // Indicates that the span describes a request to some remote service. + SPAN_KIND_CLIENT = 3; + + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SPAN_KIND_PRODUCER = 4; + + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SPAN_KIND_CONSUMER = 5; + } + + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + SpanKind kind = 6; + + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 start_time_unix_nano = 7; + + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 end_time_unix_nano = 8; + + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; + + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + uint32 dropped_attributes_count = 10; + + // Event is a time-stamped annotation of the span, consisting of user-supplied + // text description and key-value pairs. + message Event { + // time_unix_nano is the time the event occurred. + fixed64 time_unix_nano = 1; + + // name of the event. + // This field is semantically required to be set to non-empty string. + string name = 2; + + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 3; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + uint32 dropped_attributes_count = 4; + } + + // events is a collection of Event items. + repeated Event events = 11; + + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + uint32 dropped_events_count = 12; + + // A pointer from the current span to another span in the same trace or in a + // different trace. For example, this can be used in batching operations, + // where a single batch handler processes multiple requests from different + // traces or when the handler receives a request from a different project. + message Link { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + bytes trace_id = 1; + + // A unique identifier for the linked span. The ID is an 8-byte array. + bytes span_id = 2; + + // The trace_state associated with the link. + string trace_state = 3; + + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + uint32 dropped_attributes_count = 5; + + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + fixed32 flags = 6; + } + + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + repeated Link links = 13; + + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + uint32 dropped_links_count = 14; + + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status status = 15; +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +message Status { + reserved 1; + + // A developer-facing human readable error message. + string message = 2; + + // For the semantics of status codes see + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status + enum StatusCode { + // The default status. + STATUS_CODE_UNSET = 0; + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + STATUS_CODE_OK = 1; + // The Span contains an error. + STATUS_CODE_ERROR = 2; + }; + + // The status code. + StatusCode code = 3; +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +enum SpanFlags { + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + SPAN_FLAGS_DO_NOT_USE = 0; + + // Bits 0-7 are used for trace flags. + SPAN_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; + + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK = 0x00000100; + SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK = 0x00000200; + + // Bits 10-31 are reserved for future use. +} diff --git a/test/Benchmarks/TestTraceServiceClient.cs b/test/Benchmarks/TestTraceServiceClient.cs deleted file mode 100644 index fc60f90278a..00000000000 --- a/test/Benchmarks/TestTraceServiceClient.cs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -extern alias OpenTelemetryProtocol; - -using Grpc.Core; -using OpenTelemetryProtocol::OpenTelemetry.Proto.Collector.Trace.V1; - -namespace Benchmarks; - -internal class TestTraceServiceClient : TraceService.TraceServiceClient -{ - public override ExportTraceServiceResponse Export(ExportTraceServiceRequest request, Metadata? headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default) - { - return new ExportTraceServiceResponse(); - } -} From d7b7797d8b43cd77c8cd07ce36b8d963fe023563 Mon Sep 17 00:00:00 2001 From: Rajkumar Rangaraj Date: Thu, 28 Nov 2024 13:08:14 -0800 Subject: [PATCH 04/10] Test updates --- .../OtlpHttpTraceExportClientTests.cs | 6 +- .../ResourceProtoSerializerTests.cs | 66 -- .../google/rpc/error_details.proto | 37 + .../Implementation/google/rpc/status.proto | 42 ++ .../opentelemetry/proto/collector/README.md | 10 + .../collector/logs/v1/logs_service.proto | 79 ++ .../collector/logs/v1/logs_service_http.yaml | 9 + .../metrics/v1/metrics_service.proto | 79 ++ .../metrics/v1/metrics_service_http.yaml | 9 + .../v1experimental/profiles_service.proto | 78 ++ .../v1experimental/profiles_service_http.yaml | 9 + .../collector/trace/v1/trace_service.proto | 79 ++ .../trace/v1/trace_service_http.yaml | 9 + .../proto/common/v1/common.proto | 81 ++ .../opentelemetry/proto/logs/v1/logs.proto | 211 ++++++ .../proto/metrics/v1/metrics.proto | 691 ++++++++++++++++++ .../v1experimental/pprofextended.proto | 388 ++++++++++ .../profiles/v1experimental/profiles.proto | 191 +++++ .../proto/resource/v1/resource.proto | 37 + .../opentelemetry/proto/trace/v1/trace.proto | 355 +++++++++ .../MockCollectorIntegrationTests.cs | 20 +- ...xporter.OpenTelemetryProtocol.Tests.csproj | 11 + .../OtlpAttributeTests.cs | 21 +- .../OtlpExporterOptionsExtensionsTests.cs | 135 +--- ...tTests.cs => OtlpHttpExportClientTests.cs} | 17 +- .../OtlpLogExporterTests.cs | 29 +- .../OtlpMetricsExporterTests.cs | 270 ++----- .../OtlpResourceTests.cs | 28 +- .../OtlpRetryTests.cs | 119 ++- .../OtlpTraceExporterTests.cs | 4 +- .../TestExportClient.cs | 4 +- .../TestProtobufExportClient.cs | 40 - 32 files changed, 2616 insertions(+), 548 deletions(-) delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/Serializer/ResourceProtoSerializerTests.cs create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/error_details.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/status.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/README.md create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/common/v1/common.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/logs/v1/logs.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/metrics/v1/metrics.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/resource/v1/resource.proto create mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/trace/v1/trace.proto rename test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/{BaseOtlpHttpExportClientTests.cs => OtlpHttpExportClientTests.cs} (73%) delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/TestProtobufExportClient.cs diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/ExportClient/OtlpHttpTraceExportClientTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/ExportClient/OtlpHttpTraceExportClientTests.cs index b156204429f..e9a4ba80c76 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/ExportClient/OtlpHttpTraceExportClientTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/ExportClient/OtlpHttpTraceExportClientTests.cs @@ -44,7 +44,7 @@ public void NewOtlpHttpTraceExportClient_OtlpExporterOptions_ExporterHasCorrectP Headers = $"{header1.Name}={header1.Value}, {header2.Name} = {header2.Value}", }; - var client = new ProtobufOtlpHttpExportClient(options, options.HttpClientFactory(), "/v1/traces"); + var client = new OtlpHttpExportClient(options, options.HttpClientFactory(), "/v1/traces"); Assert.NotNull(client.HttpClient); @@ -86,7 +86,7 @@ public void SendExportRequest_ExportTraceServiceRequest_SendsCorrectHttpRequest( var httpClient = new HttpClient(testHttpHandler); - var exportClient = new ProtobufOtlpHttpExportClient(options, httpClient, string.Empty); + var exportClient = new OtlpHttpExportClient(options, httpClient, string.Empty); var resourceBuilder = ResourceBuilder.CreateEmpty(); if (includeServiceNameInResource) @@ -159,7 +159,7 @@ void RunTest(Batch batch) // TODO: Revisit once the HttpClient part is overridden. // Assert.IsType(httpRequest.Content); Assert.NotNull(httpRequest.Content); - Assert.Contains(httpRequest.Content.Headers, h => h.Key == "Content-Type" && h.Value.First() == ProtobufOtlpHttpExportClient.MediaHeaderValue.ToString()); + Assert.Contains(httpRequest.Content.Headers, h => h.Key == "Content-Type" && h.Value.First() == OtlpHttpExportClient.MediaHeaderValue.ToString()); var exportTraceRequest = OtlpCollector.ExportTraceServiceRequest.Parser.ParseFrom(testHttpHandler.HttpRequestContent); Assert.NotNull(exportTraceRequest); diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/Serializer/ResourceProtoSerializerTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/Serializer/ResourceProtoSerializerTests.cs deleted file mode 100644 index 0deb4c585a5..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/Serializer/ResourceProtoSerializerTests.cs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Serializer; -using OpenTelemetry.Proto.Trace.V1; -using OpenTelemetry.Resources; -using Xunit; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests.Implementation.Serializer; - -public class ResourceProtoSerializerTests -{ - [Fact] - public void CreateResource_SupportedAttributeTypes() - { - // Arrange - byte[] buffer = new byte[1024]; - var attributes = new Dictionary - { - { "string", "stringValue" }, - { "bool", true }, - { "double", 0.1d }, - { "long", 1L }, - - // int and float supported by conversion to long and double - { "int", 1 }, - { "short", (short)1 }, - { "float", 0.1f }, - - // natively supported array types - { "string arr", new string[] { "stringValue1", "stringValue2" } }, - { "bool arr", new bool[] { true } }, - { "double arr", new double[] { 0.1d } }, - { "long arr", new long[] { 1L } }, - - // have to convert to other primitive array types - { "int arr", new int[] { 1, 2, 3 } }, - { "short arr", new short[] { (short)1 } }, - { "float arr", new float[] { 0.1f } }, - }; - - // Act - var resource = ResourceBuilder.CreateEmpty().AddAttributes(attributes).Build(); - var writePosition = ProtobufOtlpResourceSerializer.WriteResource(buffer, 0, resource); - var otlpResource = resource.ToOtlpResource(); - var expectedResourceSpans = new ResourceSpans - { - Resource = otlpResource, - }; - - // Deserialize the ResourceSpans and validate the attributes. - ResourceSpans actualResourceSpans; - using (var stream = new MemoryStream(buffer, 0, writePosition)) - { - actualResourceSpans = ResourceSpans.Parser.ParseFrom(stream); - } - - // Assert - Assert.Equal(expectedResourceSpans.Resource.Attributes.Count, actualResourceSpans.Resource.Attributes.Count); - foreach (var actualAttribute in actualResourceSpans.Resource.Attributes) - { - Assert.Contains(actualAttribute, expectedResourceSpans.Resource.Attributes); - } - } -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/error_details.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/error_details.proto new file mode 100644 index 00000000000..0d5461a7287 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/error_details.proto @@ -0,0 +1,37 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +import "google/protobuf/duration.proto"; + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retries have been reached or a maximum retry delay cap has been +// reached. +message RetryInfo { + // Clients should wait at least this long between retrying the same request. + google.protobuf.Duration retry_delay = 1; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/status.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/status.proto new file mode 100644 index 00000000000..40c5bf318c2 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/status.proto @@ -0,0 +1,42 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +import "google/protobuf/any.proto"; + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +message Status { + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. + int32 code = 1; + + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. + string message = 2; + + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + repeated google.protobuf.Any details = 3; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/README.md b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/README.md new file mode 100644 index 00000000000..f82dbb0278b --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/README.md @@ -0,0 +1,10 @@ +# OpenTelemetry Collector Proto + +This package describes the OpenTelemetry collector protocol. + +## Packages + +1. `common` package contains the common messages shared between different services. +2. `trace` package contains the Trace Service protos. +3. `metrics` package contains the Metrics Service protos. +4. `logs` package contains the Logs Service protos. diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto new file mode 100644 index 00000000000..8260d8aaeb8 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto @@ -0,0 +1,79 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.collector.logs.v1; + +import "opentelemetry/proto/logs/v1/logs.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Logs.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.logs.v1"; +option java_outer_classname = "LogsServiceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/collector/logs/v1"; + +// Service that can be used to push logs between one Application instrumented with +// OpenTelemetry and an collector, or between an collector and a central collector (in this +// case logs are sent/received to/from multiple Applications). +service LogsService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportLogsServiceRequest) returns (ExportLogsServiceResponse) {} +} + +message ExportLogsServiceRequest { + // An array of ResourceLogs. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated opentelemetry.proto.logs.v1.ResourceLogs resource_logs = 1; +} + +message ExportLogsServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportLogsPartialSuccess partial_success = 1; +} + +message ExportLogsPartialSuccess { + // The number of rejected log records. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_log_records = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml new file mode 100644 index 00000000000..507473b9b3b --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.logs.v1.LogsService.Export + post: /v1/logs + body: "*" \ No newline at end of file diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto new file mode 100644 index 00000000000..dd48f1ad3a1 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto @@ -0,0 +1,79 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.collector.metrics.v1; + +import "opentelemetry/proto/metrics/v1/metrics.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Metrics.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.metrics.v1"; +option java_outer_classname = "MetricsServiceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/collector/metrics/v1"; + +// Service that can be used to push metrics between one Application +// instrumented with OpenTelemetry and a collector, or between a collector and a +// central collector. +service MetricsService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportMetricsServiceRequest) returns (ExportMetricsServiceResponse) {} +} + +message ExportMetricsServiceRequest { + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated opentelemetry.proto.metrics.v1.ResourceMetrics resource_metrics = 1; +} + +message ExportMetricsServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportMetricsPartialSuccess partial_success = 1; +} + +message ExportMetricsPartialSuccess { + // The number of rejected data points. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_data_points = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml new file mode 100644 index 00000000000..a5456502607 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.metrics.v1.MetricsService.Export + post: /v1/metrics + body: "*" \ No newline at end of file diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto new file mode 100644 index 00000000000..d0e7894b29c --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto @@ -0,0 +1,78 @@ +// Copyright 2023, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.collector.profiles.v1experimental; + +import "opentelemetry/proto/profiles/v1experimental/profiles.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Profiles.V1Experimental"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.profiles.v1experimental"; +option java_outer_classname = "ProfilesServiceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/collector/profiles/v1experimental"; + +// Service that can be used to push profiles between one Application instrumented with +// OpenTelemetry and a collector, or between a collector and a central collector. +service ProfilesService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportProfilesServiceRequest) returns (ExportProfilesServiceResponse) {} +} + +message ExportProfilesServiceRequest { + // An array of ResourceProfiles. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated opentelemetry.proto.profiles.v1experimental.ResourceProfiles resource_profiles = 1; +} + +message ExportProfilesServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportProfilesPartialSuccess partial_success = 1; +} + +message ExportProfilesPartialSuccess { + // The number of rejected profiles. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_profiles = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml new file mode 100644 index 00000000000..ea501afbbb2 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.profiles.v1experimental.ProfilesService.Export + post: /v1experimental/profiles + body: "*" diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto new file mode 100644 index 00000000000..d6fe67f9e55 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto @@ -0,0 +1,79 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.collector.trace.v1; + +import "opentelemetry/proto/trace/v1/trace.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Trace.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.trace.v1"; +option java_outer_classname = "TraceServiceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/collector/trace/v1"; + +// Service that can be used to push spans between one Application instrumented with +// OpenTelemetry and a collector, or between a collector and a central collector (in this +// case spans are sent/received to/from multiple Applications). +service TraceService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportTraceServiceRequest) returns (ExportTraceServiceResponse) {} +} + +message ExportTraceServiceRequest { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated opentelemetry.proto.trace.v1.ResourceSpans resource_spans = 1; +} + +message ExportTraceServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportTracePartialSuccess partial_success = 1; +} + +message ExportTracePartialSuccess { + // The number of rejected spans. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_spans = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml new file mode 100644 index 00000000000..d091b3a8d53 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml @@ -0,0 +1,9 @@ +# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the +# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. +type: google.api.Service +config_version: 3 +http: + rules: + - selector: opentelemetry.proto.collector.trace.v1.TraceService.Export + post: /v1/traces + body: "*" diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/common/v1/common.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/common/v1/common.proto new file mode 100644 index 00000000000..ff8a21a1fa0 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/common/v1/common.proto @@ -0,0 +1,81 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.common.v1; + +option csharp_namespace = "OpenTelemetry.Proto.Common.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.common.v1"; +option java_outer_classname = "CommonProto"; +option go_package = "go.opentelemetry.io/proto/otlp/common/v1"; + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +message AnyValue { + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "empty". + oneof value { + string string_value = 1; + bool bool_value = 2; + int64 int_value = 3; + double double_value = 4; + ArrayValue array_value = 5; + KeyValueList kvlist_value = 6; + bytes bytes_value = 7; + } +} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +message ArrayValue { + // Array of values. The array may be empty (contain 0 elements). + repeated AnyValue values = 1; +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +message KeyValueList { + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + // The keys MUST be unique (it is not allowed to have more than one + // value with the same key). + repeated KeyValue values = 1; +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +message KeyValue { + string key = 1; + AnyValue value = 2; +} + +// InstrumentationScope is a message representing the instrumentation scope information +// such as the fully qualified name and version. +message InstrumentationScope { + // An empty instrumentation scope name means the name is unknown. + string name = 1; + string version = 2; + + // Additional attributes that describe the scope. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated KeyValue attributes = 3; + uint32 dropped_attributes_count = 4; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/logs/v1/logs.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/logs/v1/logs.proto new file mode 100644 index 00000000000..f9b97dd7451 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/logs/v1/logs.proto @@ -0,0 +1,211 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.logs.v1; + +import "opentelemetry/proto/common/v1/common.proto"; +import "opentelemetry/proto/resource/v1/resource.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Logs.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.logs.v1"; +option java_outer_classname = "LogsProto"; +option go_package = "go.opentelemetry.io/proto/otlp/logs/v1"; + +// LogsData represents the logs data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP logs data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message LogsData { + // An array of ResourceLogs. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceLogs resource_logs = 1; +} + +// A collection of ScopeLogs from a Resource. +message ResourceLogs { + reserved 1000; + + // The resource for the logs in this message. + // If this field is not set then resource info is unknown. + opentelemetry.proto.resource.v1.Resource resource = 1; + + // A list of ScopeLogs that originate from a resource. + repeated ScopeLogs scope_logs = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_logs" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of Logs produced by a Scope. +message ScopeLogs { + // The instrumentation scope information for the logs in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + opentelemetry.proto.common.v1.InstrumentationScope scope = 1; + + // A list of log records. + repeated LogRecord log_records = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the log data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all logs in the "logs" field. + string schema_url = 3; +} + +// Possible values for LogRecord.SeverityNumber. +enum SeverityNumber { + // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. + SEVERITY_NUMBER_UNSPECIFIED = 0; + SEVERITY_NUMBER_TRACE = 1; + SEVERITY_NUMBER_TRACE2 = 2; + SEVERITY_NUMBER_TRACE3 = 3; + SEVERITY_NUMBER_TRACE4 = 4; + SEVERITY_NUMBER_DEBUG = 5; + SEVERITY_NUMBER_DEBUG2 = 6; + SEVERITY_NUMBER_DEBUG3 = 7; + SEVERITY_NUMBER_DEBUG4 = 8; + SEVERITY_NUMBER_INFO = 9; + SEVERITY_NUMBER_INFO2 = 10; + SEVERITY_NUMBER_INFO3 = 11; + SEVERITY_NUMBER_INFO4 = 12; + SEVERITY_NUMBER_WARN = 13; + SEVERITY_NUMBER_WARN2 = 14; + SEVERITY_NUMBER_WARN3 = 15; + SEVERITY_NUMBER_WARN4 = 16; + SEVERITY_NUMBER_ERROR = 17; + SEVERITY_NUMBER_ERROR2 = 18; + SEVERITY_NUMBER_ERROR3 = 19; + SEVERITY_NUMBER_ERROR4 = 20; + SEVERITY_NUMBER_FATAL = 21; + SEVERITY_NUMBER_FATAL2 = 22; + SEVERITY_NUMBER_FATAL3 = 23; + SEVERITY_NUMBER_FATAL4 = 24; +} + +// LogRecordFlags represents constants used to interpret the +// LogRecord.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) +// +enum LogRecordFlags { + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + LOG_RECORD_FLAGS_DO_NOT_USE = 0; + + // Bits 0-7 are used for trace flags. + LOG_RECORD_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; + + // Bits 8-31 are reserved for future use. +} + +// A log record according to OpenTelemetry Log Data Model: +// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md +message LogRecord { + reserved 4; + + // time_unix_nano is the time when the event occurred. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // Value of 0 indicates unknown or missing timestamp. + fixed64 time_unix_nano = 1; + + // Time when the event was observed by the collection system. + // For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) + // this timestamp is typically set at the generation time and is equal to Timestamp. + // For events originating externally and collected by OpenTelemetry (e.g. using + // Collector) this is the time when OpenTelemetry's code observed the event measured + // by the clock of the OpenTelemetry code. This field MUST be set once the event is + // observed by OpenTelemetry. + // + // For converting OpenTelemetry log data to formats that support only one timestamp or + // when receiving OpenTelemetry log data by recipients that support only one timestamp + // internally the following logic is recommended: + // - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // Value of 0 indicates unknown or missing timestamp. + fixed64 observed_time_unix_nano = 11; + + // Numerical value of the severity, normalized to values described in Log Data Model. + // [Optional]. + SeverityNumber severity_number = 2; + + // The severity text (also known as log level). The original string representation as + // it is known at the source. [Optional]. + string severity_text = 3; + + // A value containing the body of the log record. Can be for example a human-readable + // string message (including multi-line) describing the event in a free form or it can + // be a structured data composed of arrays and maps of other values. [Optional]. + opentelemetry.proto.common.v1.AnyValue body = 5; + + // Additional attributes that describe the specific event occurrence. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 6; + uint32 dropped_attributes_count = 7; + + // Flags, a bit field. 8 least significant bits are the trace flags as + // defined in W3C Trace Context specification. 24 most significant bits are reserved + // and must be set to 0. Readers must not assume that 24 most significant bits + // will be zero and must correctly mask the bits when reading 8-bit trace flag (use + // flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional]. + fixed32 flags = 8; + + // A unique identifier for a trace. All logs from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is optional. + // + // The receivers SHOULD assume that the log record is not associated with a + // trace if any of the following is true: + // - the field is not present, + // - the field contains an invalid value. + bytes trace_id = 9; + + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is optional. If the sender specifies a valid span_id then it SHOULD also + // specify a valid trace_id. + // + // The receivers SHOULD assume that the log record is not associated with a + // span if any of the following is true: + // - the field is not present, + // - the field contains an invalid value. + bytes span_id = 10; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/metrics/v1/metrics.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/metrics/v1/metrics.proto new file mode 100644 index 00000000000..19bb7ff8d53 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/metrics/v1/metrics.proto @@ -0,0 +1,691 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.metrics.v1; + +import "opentelemetry/proto/common/v1/common.proto"; +import "opentelemetry/proto/resource/v1/resource.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.metrics.v1"; +option java_outer_classname = "MetricsProto"; +option go_package = "go.opentelemetry.io/proto/otlp/metrics/v1"; + +// MetricsData represents the metrics data that can be stored in a persistent +// storage, OR can be embedded by other protocols that transfer OTLP metrics +// data but do not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message MetricsData { + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceMetrics resource_metrics = 1; +} + +// A collection of ScopeMetrics from a Resource. +message ResourceMetrics { + reserved 1000; + + // The resource for the metrics in this message. + // If this field is not set then no resource info is known. + opentelemetry.proto.resource.v1.Resource resource = 1; + + // A list of metrics that originate from a resource. + repeated ScopeMetrics scope_metrics = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_metrics" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of Metrics produced by an Scope. +message ScopeMetrics { + // The instrumentation scope information for the metrics in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + opentelemetry.proto.common.v1.InstrumentationScope scope = 1; + + // A list of metrics that originate from an instrumentation library. + repeated Metric metrics = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the metric data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all metrics in the "metrics" field. + string schema_url = 3; +} + +// Defines a Metric which has one or more timeseries. The following is a +// brief summary of the Metric data model. For more details, see: +// +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md +// +// +// The data model and relation between entities is shown in the +// diagram below. Here, "DataPoint" is the term used to refer to any +// one of the specific data point value types, and "points" is the term used +// to refer to any one of the lists of points contained in the Metric. +// +// - Metric is composed of a metadata and data. +// - Metadata part contains a name, description, unit. +// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). +// - DataPoint contains timestamps, attributes, and one of the possible value type +// fields. +// +// Metric +// +------------+ +// |name | +// |description | +// |unit | +------------------------------------+ +// |data |---> |Gauge, Sum, Histogram, Summary, ... | +// +------------+ +------------------------------------+ +// +// Data [One of Gauge, Sum, Histogram, Summary, ...] +// +-----------+ +// |... | // Metadata about the Data. +// |points |--+ +// +-----------+ | +// | +---------------------------+ +// | |DataPoint 1 | +// v |+------+------+ +------+ | +// +-----+ ||label |label |...|label | | +// | 1 |-->||value1|value2|...|valueN| | +// +-----+ |+------+------+ +------+ | +// | . | |+-----+ | +// | . | ||value| | +// | . | |+-----+ | +// | . | +---------------------------+ +// | . | . +// | . | . +// | . | . +// | . | +---------------------------+ +// | . | |DataPoint M | +// +-----+ |+------+------+ +------+ | +// | M |-->||label |label |...|label | | +// +-----+ ||value1|value2|...|valueN| | +// |+------+------+ +------+ | +// |+-----+ | +// ||value| | +// |+-----+ | +// +---------------------------+ +// +// Each distinct type of DataPoint represents the output of a specific +// aggregation function, the result of applying the DataPoint's +// associated function of to one or more measurements. +// +// All DataPoint types have three common fields: +// - Attributes includes key-value pairs associated with the data point +// - TimeUnixNano is required, set to the end time of the aggregation +// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints +// having an AggregationTemporality field, as discussed below. +// +// Both TimeUnixNano and StartTimeUnixNano values are expressed as +// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. +// +// # TimeUnixNano +// +// This field is required, having consistent interpretation across +// DataPoint types. TimeUnixNano is the moment corresponding to when +// the data point's aggregate value was captured. +// +// Data points with the 0 value for TimeUnixNano SHOULD be rejected +// by consumers. +// +// # StartTimeUnixNano +// +// StartTimeUnixNano in general allows detecting when a sequence of +// observations is unbroken. This field indicates to consumers the +// start time for points with cumulative and delta +// AggregationTemporality, and it should be included whenever possible +// to support correct rate calculation. Although it may be omitted +// when the start time is truly unknown, setting StartTimeUnixNano is +// strongly encouraged. +message Metric { + reserved 4, 6, 8; + + // name of the metric. + string name = 1; + + // description of the metric, which can be used in documentation. + string description = 2; + + // unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + string unit = 3; + + // Data determines the aggregation type (if any) of the metric, what is the + // reported value type for the data points, as well as the relatationship to + // the time interval over which they are reported. + oneof data { + Gauge gauge = 5; + Sum sum = 7; + Histogram histogram = 9; + ExponentialHistogram exponential_histogram = 10; + Summary summary = 11; + } + + // Additional metadata attributes that describe the metric. [Optional]. + // Attributes are non-identifying. + // Consumers SHOULD NOT need to be aware of these attributes. + // These attributes MAY be used to encode information allowing + // for lossless roundtrip translation to / from another data model. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue metadata = 12; +} + +// Gauge represents the type of a scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +message Gauge { + repeated NumberDataPoint data_points = 1; +} + +// Sum represents the type of a scalar metric that is calculated as a sum of all +// reported measurements over a time interval. +message Sum { + repeated NumberDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; + + // If "true" means that the sum is monotonic. + bool is_monotonic = 3; +} + +// Histogram represents the type of a metric that is calculated by aggregating +// as a Histogram of all reported measurements over a time interval. +message Histogram { + repeated HistogramDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; +} + +// ExponentialHistogram represents the type of a metric that is calculated by aggregating +// as a ExponentialHistogram of all reported double measurements over a time interval. +message ExponentialHistogram { + repeated ExponentialHistogramDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; +} + +// Summary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) +// data type. These data points cannot always be merged in a meaningful way. +// While they can be useful in some applications, histogram data points are +// recommended for new applications. +message Summary { + repeated SummaryDataPoint data_points = 1; +} + +// AggregationTemporality defines how a metric aggregator reports aggregated +// values. It describes how those values relate to the time interval over +// which they are aggregated. +enum AggregationTemporality { + // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; + + // DELTA is an AggregationTemporality for a metric aggregator which reports + // changes since last report time. Successive metrics contain aggregation of + // values from continuous and non-overlapping intervals. + // + // The values for a DELTA metric are based only on the time interval + // associated with one measurement cycle. There is no dependency on + // previous measurements like is the case for CUMULATIVE metrics. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // DELTA metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0+1 to + // t_0+2 with a value of 2. + AGGREGATION_TEMPORALITY_DELTA = 1; + + // CUMULATIVE is an AggregationTemporality for a metric aggregator which + // reports changes since a fixed start time. This means that current values + // of a CUMULATIVE metric depend on all previous measurements since the + // start time. Because of this, the sender is required to retain this state + // in some form. If this state is lost or invalidated, the CUMULATIVE metric + // values MUST be reset and a new fixed start time following the last + // reported measurement time sent MUST be used. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // CUMULATIVE metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+2 with a value of 5. + // 9. The system experiences a fault and loses state. + // 10. The system recovers and resumes receiving at time=t_1. + // 11. A request is received, the system measures 1 request. + // 12. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_1 to + // t_0+1 with a value of 1. + // + // Note: Even though, when reporting changes since last report time, using + // CUMULATIVE is valid, it is not recommended. This may cause problems for + // systems that do not use start_time to determine when the aggregation + // value was reset (e.g. Prometheus). + AGGREGATION_TEMPORALITY_CUMULATIVE = 2; +} + +// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a +// bit-field representing 32 distinct boolean flags. Each flag defined in this +// enum is a bit-mask. To test the presence of a single flag in the flags of +// a data point, for example, use an expression like: +// +// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK +// +enum DataPointFlags { + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + DATA_POINT_FLAGS_DO_NOT_USE = 0; + + // This DataPoint is valid but has no recorded value. This value + // SHOULD be used to reflect explicitly missing data in a series, as + // for an equivalent to the Prometheus "staleness marker". + DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1; + + // Bits 2-31 are reserved for future use. +} + +// NumberDataPoint is a single data point in a timeseries that describes the +// time-varying scalar value of a metric. +message NumberDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // The value itself. A point is considered invalid when one of the recognized + // value fields is not present inside this oneof. + oneof value { + double as_double = 4; + sfixed64 as_int = 6; + } + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 5; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 8; +} + +// HistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Histogram. A Histogram contains summary statistics +// for a population of values, it may optionally contain the distribution of +// those values across a set of buckets. +// +// If the histogram contains the distribution of values, then both +// "explicit_bounds" and "bucket counts" fields must be defined. +// If the histogram does not contain the distribution of values, then both +// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and +// "sum" are known. +message HistogramDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + optional double sum = 5; + + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + repeated fixed64 bucket_counts = 6; + + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // + // The boundaries for bucket at index i are: + // + // (-infinity, explicit_bounds[i]] for i == 0 + // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) + // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) + // + // The values in the explicit_bounds array must be strictly increasing. + // + // Histogram buckets are inclusive of their upper boundary, except the last + // bucket where the boundary is at infinity. This format is intentionally + // compatible with the OpenMetrics histogram definition. + repeated double explicit_bounds = 7; + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 8; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 10; + + // min is the minimum value over (start_time, end_time]. + optional double min = 11; + + // max is the maximum value over (start_time, end_time]. + optional double max = 12; +} + +// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +// +message ExponentialHistogramDataPoint { + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be + // non-negative. This value must be equal to the sum of the "bucket_counts" + // values in the positive and negative Buckets plus the "zero_count" field. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + optional double sum = 5; + + // scale describes the resolution of the histogram. Boundaries are + // located at powers of the base, where: + // + // base = (2^(2^-scale)) + // + // The histogram bucket identified by `index`, a signed integer, + // contains values that are greater than (base^index) and + // less than or equal to (base^(index+1)). + // + // The positive and negative ranges of the histogram are expressed + // separately. Negative values are mapped by their absolute value + // into the negative range using the same scale as the positive range. + // + // scale is not restricted by the protocol, as the permissible + // values depend on the range of the data. + sint32 scale = 6; + + // zero_count is the count of values that are either exactly zero or + // within the region considered zero by the instrumentation at the + // tolerated degree of precision. This bucket stores values that + // cannot be expressed using the standard exponential formula as + // well as values that have been rounded to zero. + // + // Implementations MAY consider the zero bucket to have probability + // mass equal to (zero_count / count). + fixed64 zero_count = 7; + + // positive carries the positive range of exponential bucket counts. + Buckets positive = 8; + + // negative carries the negative range of exponential bucket counts. + Buckets negative = 9; + + // Buckets are a set of bucket counts, encoded in a contiguous array + // of counts. + message Buckets { + // Offset is the bucket index of the first entry in the bucket_counts array. + // + // Note: This uses a varint encoding as a simple form of compression. + sint32 offset = 1; + + // bucket_counts is an array of count values, where bucket_counts[i] carries + // the count of the bucket at index (offset+i). bucket_counts[i] is the count + // of values greater than base^(offset+i) and less than or equal to + // base^(offset+i+1). + // + // Note: By contrast, the explicit HistogramDataPoint uses + // fixed64. This field is expected to have many buckets, + // especially zeros, so uint64 has been selected to ensure + // varint encoding. + repeated uint64 bucket_counts = 2; + } + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 10; + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 11; + + // min is the minimum value over (start_time, end_time]. + optional double min = 12; + + // max is the maximum value over (start_time, end_time]. + optional double max = 13; + + // ZeroThreshold may be optionally set to convey the width of the zero + // region. Where the zero region is defined as the closed interval + // [-ZeroThreshold, ZeroThreshold]. + // When ZeroThreshold is 0, zero count bucket stores values that cannot be + // expressed using the standard exponential formula as well as values that + // have been rounded to zero. + double zero_threshold = 14; +} + +// SummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +message SummaryDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be non-negative. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary + double sum = 5; + + // Represents the value at a given quantile of a distribution. + // + // To record Min and Max values following conventions are used: + // - The 1.0 quantile is equivalent to the maximum value observed. + // - The 0.0 quantile is equivalent to the minimum value observed. + // + // See the following issue for more context: + // https://github.com/open-telemetry/opentelemetry-proto/issues/125 + message ValueAtQuantile { + // The quantile of a distribution. Must be in the interval + // [0.0, 1.0]. + double quantile = 1; + + // The value at the given quantile of a distribution. + // + // Quantile values must NOT be negative. + double value = 2; + } + + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + repeated ValueAtQuantile quantile_values = 6; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 8; +} + +// A representation of an exemplar, which is a sample input measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +message Exemplar { + reserved 1; + + // The set of key/value pairs that were filtered out by the aggregator, but + // recorded alongside the original measurement. Only key/value pairs that were + // filtered out by the aggregator should be included + repeated opentelemetry.proto.common.v1.KeyValue filtered_attributes = 7; + + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 2; + + // The value of the measurement that was recorded. An exemplar is + // considered invalid when one of the recognized value fields is not present + // inside this oneof. + oneof value { + double as_double = 3; + sfixed64 as_int = 6; + } + + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + bytes span_id = 4; + + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + bytes trace_id = 5; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto new file mode 100644 index 00000000000..b5b5b88fcef --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto @@ -0,0 +1,388 @@ +// Copyright 2023, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file includes work covered by the following copyright and permission notices: +// +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Profile is a common stacktrace profile format. +// +// Measurements represented with this format should follow the +// following conventions: +// +// - Consumers should treat unset optional fields as if they had been +// set with their default value. +// +// - When possible, measurements should be stored in "unsampled" form +// that is most useful to humans. There should be enough +// information present to determine the original sampled values. +// +// - On-disk, the serialized proto must be gzip-compressed. +// +// - The profile is represented as a set of samples, where each sample +// references a sequence of locations, and where each location belongs +// to a mapping. +// - There is a N->1 relationship from sample.location_id entries to +// locations. For every sample.location_id entry there must be a +// unique Location with that index. +// - There is an optional N->1 relationship from locations to +// mappings. For every nonzero Location.mapping_id there must be a +// unique Mapping with that index. + +syntax = "proto3"; + +package opentelemetry.proto.profiles.v1experimental; + +import "opentelemetry/proto/common/v1/common.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.profiles.v1experimental"; +option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1experimental"; + +// Represents a complete profile, including sample types, samples, +// mappings to binaries, locations, functions, string table, and additional metadata. +message Profile { + // A description of the samples associated with each Sample.value. + // For a cpu profile this might be: + // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] + // For a heap profile, this might be: + // [["allocations","count"], ["space","bytes"]], + // If one of the values represents the number of events represented + // by the sample, by convention it should be at index 0 and use + // sample_type.unit == "count". + repeated ValueType sample_type = 1; + // The set of samples recorded in this profile. + repeated Sample sample = 2; + // Mapping from address ranges to the image/binary/library mapped + // into that address range. mapping[0] will be the main binary. + repeated Mapping mapping = 3; + // Locations referenced by samples via location_indices. + repeated Location location = 4; + // Array of locations referenced by samples. + repeated int64 location_indices = 15; + // Functions referenced by locations. + repeated Function function = 5; + // Lookup table for attributes. + repeated opentelemetry.proto.common.v1.KeyValue attribute_table = 16; + // Represents a mapping between Attribute Keys and Units. + repeated AttributeUnit attribute_units = 17; + // Lookup table for links. + repeated Link link_table = 18; + // A common table for strings referenced by various messages. + // string_table[0] must always be "". + repeated string string_table = 6; + // frames with Function.function_name fully matching the following + // regexp will be dropped from the samples, along with their successors. + int64 drop_frames = 7; // Index into string table. + // frames with Function.function_name fully matching the following + // regexp will be kept, even if it matches drop_frames. + int64 keep_frames = 8; // Index into string table. + + // The following fields are informational, do not affect + // interpretation of results. + + // Time of collection (UTC) represented as nanoseconds past the epoch. + int64 time_nanos = 9; + // Duration of the profile, if a duration makes sense. + int64 duration_nanos = 10; + // The kind of events between sampled occurrences. + // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] + ValueType period_type = 11; + // The number of events between sampled occurrences. + int64 period = 12; + // Free-form text associated with the profile. The text is displayed as is + // to the user by the tools that read profiles (e.g. by pprof). This field + // should not be used to store any machine-readable information, it is only + // for human-friendly content. The profile must stay functional if this field + // is cleaned. + repeated int64 comment = 13; // Indices into string table. + // Index into the string table of the type of the preferred sample + // value. If unset, clients should default to the last sample value. + int64 default_sample_type = 14; +} + +// Represents a mapping between Attribute Keys and Units. +message AttributeUnit { + // Index into string table. + int64 attribute_key = 1; + // Index into string table. + int64 unit = 2; +} + +// A pointer from a profile Sample to a trace Span. +// Connects a profile sample to a trace span, identified by unique trace and span IDs. +message Link { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + bytes trace_id = 1; + + // A unique identifier for the linked span. The ID is an 8-byte array. + bytes span_id = 2; +} + +// Specifies the method of aggregating metric values, either DELTA (change since last report) +// or CUMULATIVE (total since a fixed start time). +enum AggregationTemporality { + /* UNSPECIFIED is the default AggregationTemporality, it MUST not be used. */ + AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; + + /** DELTA is an AggregationTemporality for a profiler which reports + changes since last report time. Successive metrics contain aggregation of + values from continuous and non-overlapping intervals. + + The values for a DELTA metric are based only on the time interval + associated with one measurement cycle. There is no dependency on + previous measurements like is the case for CUMULATIVE metrics. + + For example, consider a system measuring the number of requests that + it receives and reports the sum of these requests every second as a + DELTA metric: + + 1. The system starts receiving at time=t_0. + 2. A request is received, the system measures 1 request. + 3. A request is received, the system measures 1 request. + 4. A request is received, the system measures 1 request. + 5. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+1 with a value of 3. + 6. A request is received, the system measures 1 request. + 7. A request is received, the system measures 1 request. + 8. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0+1 to + t_0+2 with a value of 2. */ + AGGREGATION_TEMPORALITY_DELTA = 1; + + /** CUMULATIVE is an AggregationTemporality for a profiler which + reports changes since a fixed start time. This means that current values + of a CUMULATIVE metric depend on all previous measurements since the + start time. Because of this, the sender is required to retain this state + in some form. If this state is lost or invalidated, the CUMULATIVE metric + values MUST be reset and a new fixed start time following the last + reported measurement time sent MUST be used. + + For example, consider a system measuring the number of requests that + it receives and reports the sum of these requests every second as a + CUMULATIVE metric: + + 1. The system starts receiving at time=t_0. + 2. A request is received, the system measures 1 request. + 3. A request is received, the system measures 1 request. + 4. A request is received, the system measures 1 request. + 5. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+1 with a value of 3. + 6. A request is received, the system measures 1 request. + 7. A request is received, the system measures 1 request. + 8. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+2 with a value of 5. + 9. The system experiences a fault and loses state. + 10. The system recovers and resumes receiving at time=t_1. + 11. A request is received, the system measures 1 request. + 12. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_1 to + t_1+1 with a value of 1. + + Note: Even though, when reporting changes since last report time, using + CUMULATIVE is valid, it is not recommended. */ + AGGREGATION_TEMPORALITY_CUMULATIVE = 2; +} + +// ValueType describes the type and units of a value, with an optional aggregation temporality. +message ValueType { + int64 type = 1; // Index into string table. + int64 unit = 2; // Index into string table. + + AggregationTemporality aggregation_temporality = 3; +} + +// Each Sample records values encountered in some program +// context. The program context is typically a stack trace, perhaps +// augmented with auxiliary information like the thread-id, some +// indicator of a higher level request being handled etc. +message Sample { + // The indices recorded here correspond to locations in Profile.location. + // The leaf is at location_index[0]. [deprecated, superseded by locations_start_index / locations_length] + repeated uint64 location_index = 1; + // locations_start_index along with locations_length refers to to a slice of locations in Profile.location. + // Supersedes location_index. + uint64 locations_start_index = 7; + // locations_length along with locations_start_index refers to a slice of locations in Profile.location. + // Supersedes location_index. + uint64 locations_length = 8; + // A 128bit id that uniquely identifies this stacktrace, globally. Index into string table. [optional] + uint32 stacktrace_id_index = 9; + // The type and unit of each value is defined by the corresponding + // entry in Profile.sample_type. All samples must have the same + // number of values, the same as the length of Profile.sample_type. + // When aggregating multiple samples into a single sample, the + // result has a list of values that is the element-wise sum of the + // lists of the originals. + repeated int64 value = 2; + // label includes additional context for this sample. It can include + // things like a thread id, allocation size, etc. + // + // NOTE: While possible, having multiple values for the same label key is + // strongly discouraged and should never be used. Most tools (e.g. pprof) do + // not have good (or any) support for multi-value labels. And an even more + // discouraged case is having a string label and a numeric label of the same + // name on a sample. Again, possible to express, but should not be used. + // [deprecated, superseded by attributes] + repeated Label label = 3; + // References to attributes in Profile.attribute_table. [optional] + repeated uint64 attributes = 10; + + // Reference to link in Profile.link_table. [optional] + uint64 link = 12; + + // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected + // to fall within the Profile's time range. [optional] + repeated uint64 timestamps_unix_nano = 13; +} + +// Provides additional context for a sample, +// such as thread ID or allocation size, with optional units. [deprecated] +message Label { + int64 key = 1; // Index into string table + + // At most one of the following must be present + int64 str = 2; // Index into string table + int64 num = 3; + + // Should only be present when num is present. + // Specifies the units of num. + // Use arbitrary string (for example, "requests") as a custom count unit. + // If no unit is specified, consumer may apply heuristic to deduce the unit. + // Consumers may also interpret units like "bytes" and "kilobytes" as memory + // units and units like "seconds" and "nanoseconds" as time units, + // and apply appropriate unit conversions to these. + int64 num_unit = 4; // Index into string table +} + +// Indicates the semantics of the build_id field. +enum BuildIdKind { + // Linker-generated build ID, stored in the ELF binary notes. + BUILD_ID_LINKER = 0; + // Build ID based on the content hash of the binary. Currently no particular + // hashing approach is standardized, so a given producer needs to define it + // themselves and thus unlike BUILD_ID_LINKER this kind of hash is producer-specific. + // We may choose to provide a standardized stable hash recommendation later. + BUILD_ID_BINARY_HASH = 1; +} + +// Describes the mapping of a binary in memory, including its address range, +// file offset, and metadata like build ID +message Mapping { + // Unique nonzero id for the mapping. [deprecated] + uint64 id = 1; + // Address at which the binary (or DLL) is loaded into memory. + uint64 memory_start = 2; + // The limit of the address range occupied by this mapping. + uint64 memory_limit = 3; + // Offset in the binary that corresponds to the first mapped address. + uint64 file_offset = 4; + // The object this entry is loaded from. This can be a filename on + // disk for the main binary and shared libraries, or virtual + // abstractions like "[vdso]". + int64 filename = 5; // Index into string table + // A string that uniquely identifies a particular program version + // with high probability. E.g., for binaries generated by GNU tools, + // it could be the contents of the .note.gnu.build-id field. + int64 build_id = 6; // Index into string table + // Specifies the kind of build id. See BuildIdKind enum for more details [optional] + BuildIdKind build_id_kind = 11; + // References to attributes in Profile.attribute_table. [optional] + repeated uint64 attributes = 12; + // The following fields indicate the resolution of symbolic info. + bool has_functions = 7; + bool has_filenames = 8; + bool has_line_numbers = 9; + bool has_inline_frames = 10; +} + +// Describes function and line table debug information. +message Location { + // Unique nonzero id for the location. A profile could use + // instruction addresses or any integer sequence as ids. [deprecated] + uint64 id = 1; + // The index of the corresponding profile.Mapping for this location. + // It can be unset if the mapping is unknown or not applicable for + // this profile type. + uint64 mapping_index = 2; + // The instruction address for this location, if available. It + // should be within [Mapping.memory_start...Mapping.memory_limit] + // for the corresponding mapping. A non-leaf address may be in the + // middle of a call instruction. It is up to display tools to find + // the beginning of the instruction if necessary. + uint64 address = 3; + // Multiple line indicates this location has inlined functions, + // where the last entry represents the caller into which the + // preceding entries were inlined. + // + // E.g., if memcpy() is inlined into printf: + // line[0].function_name == "memcpy" + // line[1].function_name == "printf" + repeated Line line = 4; + // Provides an indication that multiple symbols map to this location's + // address, for example due to identical code folding by the linker. In that + // case the line information above represents one of the multiple + // symbols. This field must be recomputed when the symbolization state of the + // profile changes. + bool is_folded = 5; + + // Type of frame (e.g. kernel, native, python, hotspot, php). Index into string table. + uint32 type_index = 6; + + // References to attributes in Profile.attribute_table. [optional] + repeated uint64 attributes = 7; +} + +// Details a specific line in a source code, linked to a function. +message Line { + // The index of the corresponding profile.Function for this line. + uint64 function_index = 1; + // Line number in source code. + int64 line = 2; + // Column number in source code. + int64 column = 3; +} + +// Describes a function, including its human-readable name, system name, +// source file, and starting line number in the source. +message Function { + // Unique nonzero id for the function. [deprecated] + uint64 id = 1; + // Name of the function, in human-readable form if available. + int64 name = 2; // Index into string table + // Name of the function, as identified by the system. + // For instance, it can be a C++ mangled name. + int64 system_name = 3; // Index into string table + // Source file containing the function. + int64 filename = 4; // Index into string table + // Line number in source file. + int64 start_line = 5; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto new file mode 100644 index 00000000000..84b0108f86a --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto @@ -0,0 +1,191 @@ +// Copyright 2023, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.profiles.v1experimental; + +import "opentelemetry/proto/common/v1/common.proto"; +import "opentelemetry/proto/resource/v1/resource.proto"; +import "opentelemetry/proto/profiles/v1experimental/pprofextended.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.profiles.v1experimental"; +option java_outer_classname = "ProfilesProto"; +option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1experimental"; + +// Relationships Diagram +// +// ┌──────────────────┐ LEGEND +// │ ProfilesData │ +// └──────────────────┘ ─────▶ embedded +// │ +// │ 1-n ─────▷ referenced by index +// ▼ +// ┌──────────────────┐ +// │ ResourceProfiles │ +// └──────────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ ScopeProfiles │ +// └──────────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ ProfileContainer │ +// └──────────────────┘ +// │ +// │ 1-1 +// ▼ +// ┌──────────────────┐ +// │ Profile │ +// └──────────────────┘ +// │ n-1 +// │ 1-n ┌───────────────────────────────────────┐ +// ▼ │ ▽ +// ┌──────────────────┐ 1-n ┌──────────────┐ ┌──────────┐ +// │ Sample │ ──────▷ │ KeyValue │ │ Link │ +// └──────────────────┘ └──────────────┘ └──────────┘ +// │ 1-n △ △ +// │ 1-n ┌─────────────────┘ │ 1-n +// ▽ │ │ +// ┌──────────────────┐ n-1 ┌──────────────┐ +// │ Location │ ──────▷ │ Mapping │ +// └──────────────────┘ └──────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ Line │ +// └──────────────────┘ +// │ +// │ 1-1 +// ▽ +// ┌──────────────────┐ +// │ Function │ +// └──────────────────┘ +// + +// ProfilesData represents the profiles data that can be stored in persistent storage, +// OR can be embedded by other protocols that transfer OTLP profiles data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message ProfilesData { + // An array of ResourceProfiles. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceProfiles resource_profiles = 1; +} + + +// A collection of ScopeProfiles from a Resource. +message ResourceProfiles { + reserved 1000; + + // The resource for the profiles in this message. + // If this field is not set then no resource info is known. + opentelemetry.proto.resource.v1.Resource resource = 1; + + // A list of ScopeProfiles that originate from a resource. + repeated ScopeProfiles scope_profiles = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_profiles" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of ProfileContainers produced by an InstrumentationScope. +message ScopeProfiles { + // The instrumentation scope information for the profiles in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + opentelemetry.proto.common.v1.InstrumentationScope scope = 1; + + // A list of ProfileContainers that originate from an instrumentation scope. + repeated ProfileContainer profiles = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the metric data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all profiles in the "profiles" field. + string schema_url = 3; +} + +// A ProfileContainer represents a single profile. It wraps pprof profile with OpenTelemetry specific metadata. +message ProfileContainer { + // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with + // all zeroes is considered invalid. + // + // This field is required. + bytes profile_id = 1; + + // start_time_unix_nano is the start time of the profile. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 start_time_unix_nano = 2; + + // end_time_unix_nano is the end time of the profile. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 end_time_unix_nano = 3; + + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; + + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + uint32 dropped_attributes_count = 5; + + // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] + string original_payload_format = 6; + + // Original payload can be stored in this field. This can be useful for users who want to get the original payload. + // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. + // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. + // If the original payload is in pprof format, it SHOULD not be included in this field. + // The field is optional, however if it is present `profile` MUST be present and contain the same profiling information. + bytes original_payload = 7; + + // This is a reference to a pprof profile. Required, even when original_payload is present. + opentelemetry.proto.profiles.v1experimental.Profile profile = 8; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/resource/v1/resource.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/resource/v1/resource.proto new file mode 100644 index 00000000000..6637560bc35 --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/resource/v1/resource.proto @@ -0,0 +1,37 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.resource.v1; + +import "opentelemetry/proto/common/v1/common.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Resource.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.resource.v1"; +option java_outer_classname = "ResourceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/resource/v1"; + +// Resource information. +message Resource { + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + uint32 dropped_attributes_count = 2; +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/trace/v1/trace.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/trace/v1/trace.proto new file mode 100644 index 00000000000..5cb2f3ce1cd --- /dev/null +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/trace/v1/trace.proto @@ -0,0 +1,355 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.trace.v1; + +import "opentelemetry/proto/common/v1/common.proto"; +import "opentelemetry/proto/resource/v1/resource.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Trace.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.trace.v1"; +option java_outer_classname = "TraceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/trace/v1"; + +// TracesData represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message TracesData { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceSpans resource_spans = 1; +} + +// A collection of ScopeSpans from a Resource. +message ResourceSpans { + reserved 1000; + + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + opentelemetry.proto.resource.v1.Resource resource = 1; + + // A list of ScopeSpans that originate from a resource. + repeated ScopeSpans scope_spans = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of Spans produced by an InstrumentationScope. +message ScopeSpans { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + opentelemetry.proto.common.v1.InstrumentationScope scope = 1; + + // A list of Spans that originate from an instrumentation scope. + repeated Span spans = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + string schema_url = 3; +} + +// A Span represents a single operation performed by a single component of the system. +// +// The next available field id is 17. +message Span { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + bytes trace_id = 1; + + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + bytes span_id = 2; + + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + string trace_state = 3; + + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + bytes parent_span_id = 4; + + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + fixed32 flags = 16; + + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + string name = 5; + + // SpanKind is the type of span. Can be used to specify additional relationships between spans + // in addition to a parent/child relationship. + enum SpanKind { + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + SPAN_KIND_UNSPECIFIED = 0; + + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SPAN_KIND_INTERNAL = 1; + + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SPAN_KIND_SERVER = 2; + + // Indicates that the span describes a request to some remote service. + SPAN_KIND_CLIENT = 3; + + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SPAN_KIND_PRODUCER = 4; + + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SPAN_KIND_CONSUMER = 5; + } + + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + SpanKind kind = 6; + + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 start_time_unix_nano = 7; + + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 end_time_unix_nano = 8; + + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; + + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + uint32 dropped_attributes_count = 10; + + // Event is a time-stamped annotation of the span, consisting of user-supplied + // text description and key-value pairs. + message Event { + // time_unix_nano is the time the event occurred. + fixed64 time_unix_nano = 1; + + // name of the event. + // This field is semantically required to be set to non-empty string. + string name = 2; + + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 3; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + uint32 dropped_attributes_count = 4; + } + + // events is a collection of Event items. + repeated Event events = 11; + + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + uint32 dropped_events_count = 12; + + // A pointer from the current span to another span in the same trace or in a + // different trace. For example, this can be used in batching operations, + // where a single batch handler processes multiple requests from different + // traces or when the handler receives a request from a different project. + message Link { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + bytes trace_id = 1; + + // A unique identifier for the linked span. The ID is an 8-byte array. + bytes span_id = 2; + + // The trace_state associated with the link. + string trace_state = 3; + + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + uint32 dropped_attributes_count = 5; + + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + fixed32 flags = 6; + } + + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + repeated Link links = 13; + + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + uint32 dropped_links_count = 14; + + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status status = 15; +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +message Status { + reserved 1; + + // A developer-facing human readable error message. + string message = 2; + + // For the semantics of status codes see + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status + enum StatusCode { + // The default status. + STATUS_CODE_UNSET = 0; + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + STATUS_CODE_OK = 1; + // The Span contains an error. + STATUS_CODE_ERROR = 2; + }; + + // The status code. + StatusCode code = 3; +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +enum SpanFlags { + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + SPAN_FLAGS_DO_NOT_USE = 0; + + // Bits 0-7 are used for trace flags. + SPAN_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; + + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK = 0x00000100; + SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK = 0x00000200; + + // Bits 10-31 are reserved for future use. +} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/MockCollectorIntegrationTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/MockCollectorIntegrationTests.cs index e0fa32e5970..ab382fe728a 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/MockCollectorIntegrationTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/MockCollectorIntegrationTests.cs @@ -354,22 +354,22 @@ public async Task HttpPersistentStorageRetryTests(bool usePersistentStorageTrans var exporterOptions = new OtlpExporterOptions() { Endpoint = endpoint, TimeoutMilliseconds = 20000 }; - var exportClient = new ProtobufOtlpHttpExportClient(exporterOptions, new HttpClient(), "/v1/traces"); + var exportClient = new OtlpHttpExportClient(exporterOptions, new HttpClient(), "/v1/traces"); // TODO: update this to configure via experimental environment variable. - ProtobufOtlpExporterTransmissionHandler transmissionHandler; + OtlpExporterTransmissionHandler transmissionHandler; MockFileProvider? mockProvider = null; if (usePersistentStorageTransmissionHandler) { mockProvider = new MockFileProvider(); - transmissionHandler = new ProtobufOtlpExporterPersistentStorageTransmissionHandler( + transmissionHandler = new OtlpExporterPersistentStorageTransmissionHandler( mockProvider, exportClient, exporterOptions.TimeoutMilliseconds); } else { - transmissionHandler = new ProtobufOtlpExporterTransmissionHandler(exportClient, exporterOptions.TimeoutMilliseconds); + transmissionHandler = new OtlpExporterTransmissionHandler(exportClient, exporterOptions.TimeoutMilliseconds); } var configuration = new ConfigurationBuilder() @@ -405,7 +405,7 @@ public async Task HttpPersistentStorageRetryTests(bool usePersistentStorageTrans Assert.Single(mockProvider!.TryGetBlobs()); // Force Retry - Assert.True((transmissionHandler as ProtobufOtlpExporterPersistentStorageTransmissionHandler)?.InitiateAndWaitForRetryProcess(-1)); + Assert.True((transmissionHandler as OtlpExporterPersistentStorageTransmissionHandler)?.InitiateAndWaitForRetryProcess(-1)); Assert.False(mockProvider.TryGetBlob(out _)); } @@ -494,22 +494,22 @@ public async Task GrpcPersistentStorageRetryTests(bool usePersistentStorageTrans var exporterOptions = new OtlpExporterOptions() { Endpoint = endpoint, TimeoutMilliseconds = 20000 }; - var exportClient = new ProtobufOtlpGrpcExportClient(exporterOptions, new HttpClient(), "opentelemetry.proto.collector.trace.v1.TraceService/Export"); + var exportClient = new OtlpGrpcExportClient(exporterOptions, new HttpClient(), "opentelemetry.proto.collector.trace.v1.TraceService/Export"); // TODO: update this to configure via experimental environment variable. - ProtobufOtlpExporterTransmissionHandler transmissionHandler; + OtlpExporterTransmissionHandler transmissionHandler; MockFileProvider? mockProvider = null; if (usePersistentStorageTransmissionHandler) { mockProvider = new MockFileProvider(); - transmissionHandler = new ProtobufOtlpExporterPersistentStorageTransmissionHandler( + transmissionHandler = new OtlpExporterPersistentStorageTransmissionHandler( mockProvider, exportClient, exporterOptions.TimeoutMilliseconds); } else { - transmissionHandler = new ProtobufOtlpExporterTransmissionHandler(exportClient, exporterOptions.TimeoutMilliseconds); + transmissionHandler = new OtlpExporterTransmissionHandler(exportClient, exporterOptions.TimeoutMilliseconds); } var configuration = new ConfigurationBuilder() @@ -545,7 +545,7 @@ public async Task GrpcPersistentStorageRetryTests(bool usePersistentStorageTrans Assert.Single(mockProvider.TryGetBlobs()); // Force Retry - Assert.True((transmissionHandler as ProtobufOtlpExporterPersistentStorageTransmissionHandler)?.InitiateAndWaitForRetryProcess(-1)); + Assert.True((transmissionHandler as OtlpExporterPersistentStorageTransmissionHandler)?.InitiateAndWaitForRetryProcess(-1)); Assert.False(mockProvider.TryGetBlob(out _)); } diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests.csproj b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests.csproj index 1b9526a3ea7..6f1833c910b 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests.csproj +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests.csproj @@ -2,6 +2,7 @@ $(TargetFrameworksForTests) + $(NoWarn);SA1518;SA1210 @@ -31,6 +32,10 @@ + + + + @@ -49,4 +54,10 @@ + + + Implementation + + + diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpAttributeTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpAttributeTests.cs index a325874fb30..05ffe584eb0 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpAttributeTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpAttributeTests.cs @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 using System.Diagnostics.CodeAnalysis; -using Google.Protobuf.Collections; -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; +using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Serializer; using Xunit; using OtlpCommon = OpenTelemetry.Proto.Common.V1; @@ -269,12 +268,22 @@ public void ExceptionInToStringIsCaught() private static bool TryTransformTag(KeyValuePair tag, [NotNullWhen(true)] out OtlpCommon.KeyValue? attribute) { - var destination = new RepeatedField(); + ProtobufOtlpTagWriter.OtlpTagWriterState otlpTagWriterState = new ProtobufOtlpTagWriter.OtlpTagWriterState + { + Buffer = new byte[1024], + WritePosition = 0, + }; - if (OtlpTagWriter.Instance.TryWriteTag(ref destination, tag)) + if (ProtobufOtlpTagWriter.Instance.TryWriteTag(ref otlpTagWriterState, tag)) { - Assert.NotEmpty(destination); - attribute = destination[0]; + // Deserialize the ResourceSpans and validate the attributes. + using (var stream = new MemoryStream(otlpTagWriterState.Buffer, 0, otlpTagWriterState.WritePosition)) + { + var keyValue = OtlpCommon.KeyValue.Parser.ParseFrom(stream); + Assert.NotNull(keyValue); + attribute = keyValue; + } + return true; } diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpExporterOptionsExtensionsTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpExporterOptionsExtensionsTests.cs index d7c66ab8054..207a20483a4 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpExporterOptionsExtensionsTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpExporterOptionsExtensionsTests.cs @@ -9,67 +9,11 @@ using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; using Xunit; -using Xunit.Sdk; namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests; public class OtlpExporterOptionsExtensionsTests { - [Theory] - [InlineData("key=value", new string[] { "key" }, new string[] { "value" })] - [InlineData("key1=value1,key2=value2", new string[] { "key1", "key2" }, new string[] { "value1", "value2" })] - [InlineData("key1 = value1, key2=value2 ", new string[] { "key1", "key2" }, new string[] { "value1", "value2" })] - [InlineData("key==value", new string[] { "key" }, new string[] { "=value" })] - [InlineData("access-token=abc=/123,timeout=1234", new string[] { "access-token", "timeout" }, new string[] { "abc=/123", "1234" })] - [InlineData("key1=value1;key2=value2", new string[] { "key1" }, new string[] { "value1;key2=value2" })] // semicolon is not treated as a delimiter (https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#specifying-headers-via-environment-variables) - [InlineData("Authorization=Basic%20AAA", new string[] { "authorization" }, new string[] { "Basic AAA" })] - [InlineData("Authorization=Basic AAA", new string[] { "authorization" }, new string[] { "Basic AAA" })] - public void GetMetadataFromHeadersWorksCorrectFormat(string headers, string[] keys, string[] values) - { - var options = new OtlpExporterOptions - { - Headers = headers, - }; - var metadata = options.GetMetadataFromHeaders(); - - Assert.Equal(OtlpExporterOptions.StandardHeaders.Length + keys.Length, metadata.Count); - - for (int i = 0; i < keys.Length; i++) - { - Assert.Contains(metadata, entry => entry.Key == keys[i] && entry.Value == values[i]); - } - - for (int i = 0; i < OtlpExporterOptions.StandardHeaders.Length; i++) - { - // Metadata key is always converted to lowercase. - // See: https://cloud.google.com/dotnet/docs/reference/Grpc.Core/latest/Grpc.Core.Metadata.Entry#Grpc_Core_Metadata_Entry__ctor_System_String_System_String_ - Assert.Contains(metadata, entry => entry.Key == OtlpExporterOptions.StandardHeaders[i].Key.ToLower() && entry.Value == OtlpExporterOptions.StandardHeaders[i].Value); - } - } - - [Theory] - [InlineData("headers")] - [InlineData("key,value")] - public void GetMetadataFromHeadersThrowsExceptionOnInvalidFormat(string headers) - { - try - { - var options = new OtlpExporterOptions - { - Headers = headers, - }; - var metadata = options.GetMetadataFromHeaders(); - } - catch (Exception ex) - { - Assert.IsType(ex); - Assert.Equal("Headers provided in an invalid format.", ex.Message); - return; - } - - throw new XunitException("GetMetadataFromHeaders did not throw an exception for invalid input headers"); - } - [Theory] [InlineData("")] [InlineData(null)] @@ -91,8 +35,8 @@ public void GetHeaders_NoOptionHeaders_ReturnsStandardHeaders(string? optionHead } [Theory] - [InlineData(OtlpExportProtocol.Grpc, typeof(ProtobufOtlpGrpcExportClient))] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(ProtobufOtlpHttpExportClient))] + [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcExportClient))] + [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpExportClient))] public void GetTraceExportClient_SupportedProtocol_ReturnsCorrectExportClient(OtlpExportProtocol protocol, Type expectedExportClientType) { var options = new OtlpExporterOptions @@ -131,24 +75,15 @@ public void AppendPathIfNotPresent_TracesPath_AppendsCorrectly(string inputUri, } [Theory] - [InlineData(OtlpExportProtocol.Grpc, typeof(ProtobufOtlpGrpcExportClient), false, 10000, null)] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(ProtobufOtlpHttpExportClient), false, 10000, null)] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(ProtobufOtlpHttpExportClient), true, 8000, null)] - [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcMetricsExportClient), false, 10000, null)] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), false, 10000, null)] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), true, 8000, null)] - [InlineData(OtlpExportProtocol.Grpc, typeof(ProtobufOtlpGrpcExportClient), false, 10000, "in_memory")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(ProtobufOtlpHttpExportClient), false, 10000, "in_memory")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(ProtobufOtlpHttpExportClient), true, 8000, "in_memory")] - [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcMetricsExportClient), false, 10000, "in_memory")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), false, 10000, "in_memory")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), true, 8000, "in_memory")] - [InlineData(OtlpExportProtocol.Grpc, typeof(ProtobufOtlpGrpcExportClient), false, 10000, "disk")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(ProtobufOtlpHttpExportClient), false, 10000, "disk")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(ProtobufOtlpHttpExportClient), true, 8000, "disk")] - [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcMetricsExportClient), false, 10000, "disk")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), false, 10000, "disk")] - [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpMetricsExportClient), true, 8000, "disk")] + [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcExportClient), false, 10000, null)] + [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpExportClient), false, 10000, null)] + [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpExportClient), true, 8000, null)] + [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcExportClient), false, 10000, "in_memory")] + [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpExportClient), false, 10000, "in_memory")] + [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpExportClient), true, 8000, "in_memory")] + [InlineData(OtlpExportProtocol.Grpc, typeof(OtlpGrpcExportClient), false, 10000, "disk")] + [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpExportClient), false, 10000, "disk")] + [InlineData(OtlpExportProtocol.HttpProtobuf, typeof(OtlpHttpExportClient), true, 8000, "disk")] public void GetTransmissionHandler_InitializesCorrectHandlerExportClientAndTimeoutValue(OtlpExportProtocol protocol, Type exportClientType, bool customHttpClient, int expectedTimeoutMilliseconds, string? retryStrategy) { var exporterOptions = new OtlpExporterOptions() { Protocol = protocol }; @@ -164,59 +99,23 @@ public void GetTransmissionHandler_InitializesCorrectHandlerExportClientAndTimeo .AddInMemoryCollection(new Dictionary { [ExperimentalOptions.OtlpRetryEnvVar] = retryStrategy }) .Build(); - if (exportClientType == typeof(ProtobufOtlpGrpcExportClient) || exportClientType == typeof(ProtobufOtlpHttpExportClient)) - { - var transmissionHandler = exporterOptions.GetProtobufExportTransmissionHandler(new ExperimentalOptions(configuration), OtlpSignalType.Traces); - - AssertTransmissionHandler(transmissionHandler, exportClientType, expectedTimeoutMilliseconds, retryStrategy); - } - else if (exportClientType == typeof(OtlpGrpcMetricsExportClient) || exportClientType == typeof(OtlpHttpMetricsExportClient)) - { - var transmissionHandler = exporterOptions.GetMetricsExportTransmissionHandler(new ExperimentalOptions(configuration)); - - AssertTransmissionHandler(transmissionHandler, exportClientType, expectedTimeoutMilliseconds, retryStrategy); - } - else - { - var transmissionHandler = exporterOptions.GetProtobufExportTransmissionHandler(new ExperimentalOptions(configuration), OtlpSignalType.Logs); - - AssertTransmissionHandler(transmissionHandler, exportClientType, expectedTimeoutMilliseconds, retryStrategy); - } - } - - private static void AssertTransmissionHandler(OtlpExporterTransmissionHandler transmissionHandler, Type exportClientType, int expectedTimeoutMilliseconds, string? retryStrategy) - { - if (retryStrategy == "in_memory") - { - Assert.True(transmissionHandler is OtlpExporterRetryTransmissionHandler); - } - else if (retryStrategy == "disk") - { - Assert.True(transmissionHandler is OtlpExporterPersistentStorageTransmissionHandler); - } - else - { - Assert.True(transmissionHandler is OtlpExporterTransmissionHandler); - } - - Assert.Equal(exportClientType, transmissionHandler.ExportClient.GetType()); - - Assert.Equal(expectedTimeoutMilliseconds, transmissionHandler.TimeoutMilliseconds); + var transmissionHandler = exporterOptions.GetProtobufExportTransmissionHandler(new ExperimentalOptions(configuration), OtlpSignalType.Traces); + AssertTransmissionHandler(transmissionHandler, exportClientType, expectedTimeoutMilliseconds, retryStrategy); } - private static void AssertTransmissionHandler(ProtobufOtlpExporterTransmissionHandler transmissionHandler, Type exportClientType, int expectedTimeoutMilliseconds, string? retryStrategy) + private static void AssertTransmissionHandler(OtlpExporterTransmissionHandler transmissionHandler, Type exportClientType, int expectedTimeoutMilliseconds, string? retryStrategy) { if (retryStrategy == "in_memory") { - Assert.True(transmissionHandler is ProtobufOtlpExporterRetryTransmissionHandler); + Assert.True(transmissionHandler is OtlpExporterRetryTransmissionHandler); } else if (retryStrategy == "disk") { - Assert.True(transmissionHandler is ProtobufOtlpExporterPersistentStorageTransmissionHandler); + Assert.True(transmissionHandler is OtlpExporterPersistentStorageTransmissionHandler); } else { - Assert.True(transmissionHandler is ProtobufOtlpExporterTransmissionHandler); + Assert.True(transmissionHandler is OtlpExporterTransmissionHandler); } Assert.Equal(exportClientType, transmissionHandler.ExportClient.GetType()); diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/BaseOtlpHttpExportClientTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpHttpExportClientTests.cs similarity index 73% rename from test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/BaseOtlpHttpExportClientTests.cs rename to test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpHttpExportClientTests.cs index 36d534df9a1..c21dbd8c8a0 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/BaseOtlpHttpExportClientTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpHttpExportClientTests.cs @@ -9,7 +9,7 @@ namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests; -public class BaseOtlpHttpExportClientTests +public class OtlpHttpExportClientTests { [Theory] [InlineData(null, null, "http://localhost:4318/signal/path")] @@ -30,7 +30,7 @@ public void ValidateOtlpHttpExportClientEndpoint(string? optionEndpoint, string? options.Endpoint = new Uri(optionEndpoint); } - var exporterClient = new TestOtlpHttpExportClient(options, new HttpClient()); + var exporterClient = new OtlpHttpExportClient(options, new HttpClient(), "signal/path"); Assert.Equal(new Uri(expectedExporterEndpoint), exporterClient.Endpoint); } finally @@ -38,17 +38,4 @@ public void ValidateOtlpHttpExportClientEndpoint(string? optionEndpoint, string? Environment.SetEnvironmentVariable(OtlpSpecConfigDefinitions.DefaultEndpointEnvVarName, null); } } - - internal class TestOtlpHttpExportClient : BaseOtlpHttpExportClient - { - public TestOtlpHttpExportClient(OtlpExporterOptions options, HttpClient httpClient) - : base(options, httpClient, "signal/path") - { - } - - protected override HttpContent CreateHttpContent(string exportRequest) - { - throw new NotImplementedException(); - } - } } diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpLogExporterTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpLogExporterTests.cs index 626cf68a30c..95acb36da9b 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpLogExporterTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpLogExporterTests.cs @@ -12,6 +12,7 @@ using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Transmission; using OpenTelemetry.Internal; using OpenTelemetry.Logs; +using OpenTelemetry.Proto.Trace.V1; using OpenTelemetry.Resources; using OpenTelemetry.Tests; using OpenTelemetry.Trace; @@ -685,9 +686,9 @@ public void CheckToOtlpLogRecordRespectsAttributeLimits() public void Export_WhenExportClientIsProvidedInCtor_UsesProvidedExportClient() { // Arrange. - var testExportClient = new TestProtobufExportClient(); + var testExportClient = new TestExportClient(); var exporterOptions = new OtlpExporterOptions(); - var transmissionHandler = new ProtobufOtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); + var transmissionHandler = new OtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); var emptyLogRecords = Array.Empty(); var emptyBatch = new Batch(emptyLogRecords, emptyLogRecords.Length); var sut = new OtlpLogExporter( @@ -707,9 +708,9 @@ public void Export_WhenExportClientIsProvidedInCtor_UsesProvidedExportClient() public void Export_WhenExportClientThrowsException_ReturnsExportResultFailure() { // Arrange. - var testExportClient = new TestProtobufExportClient(throwException: true); + var testExportClient = new TestExportClient(throwException: true); var exporterOptions = new OtlpExporterOptions(); - var transmissionHandler = new ProtobufOtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); + var transmissionHandler = new OtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); var emptyLogRecords = Array.Empty(); var emptyBatch = new Batch(emptyLogRecords, emptyLogRecords.Length); var sut = new OtlpLogExporter( @@ -729,9 +730,9 @@ public void Export_WhenExportClientThrowsException_ReturnsExportResultFailure() public void Export_WhenExportIsSuccessful_ReturnsExportResultSuccess() { // Arrange. - var testExportClient = new TestProtobufExportClient(); + var testExportClient = new TestExportClient(); var exporterOptions = new OtlpExporterOptions(); - var transmissionHandler = new ProtobufOtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); + var transmissionHandler = new OtlpExporterTransmissionHandler(testExportClient, exporterOptions.TimeoutMilliseconds); var emptyLogRecords = Array.Empty(); var emptyBatch = new Batch(emptyLogRecords, emptyLogRecords.Length); var sut = new OtlpLogExporter( @@ -1344,7 +1345,7 @@ public void ValidateInstrumentationScope() var batch = new Batch(logRecords.ToArray(), logRecords.Count); var resourceBuilder = ResourceBuilder.CreateEmpty(); - var processResource = resourceBuilder.Build().ToOtlpResource(); + var processResource = CreateResourceSpans(resourceBuilder.Build()); OtlpCollector.ExportLogsServiceRequest request = CreateLogsExportRequest(DefaultSdkLimitOptions, new ExperimentalOptions(), batch, resourceBuilder.Build()); @@ -1614,4 +1615,18 @@ private static OtlpCollector.ExportLogsServiceRequest CreateLogsExportRequest(Sd var scopeLogs = OtlpLogs.ScopeLogs.Parser.ParseFrom(stream); return scopeLogs.LogRecords.FirstOrDefault(); } + + private static ResourceSpans CreateResourceSpans(Resource resource) + { + byte[] buffer = new byte[1024]; + var writePosition = ProtobufOtlpResourceSerializer.WriteResource(buffer, 0, resource); + + ResourceSpans? resourceSpans; + using (var stream = new MemoryStream(buffer, 0, writePosition)) + { + resourceSpans = ResourceSpans.Parser.ParseFrom(stream); + } + + return resourceSpans; + } } diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpMetricsExporterTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpMetricsExporterTests.cs index f34bc572126..d90d3934ad7 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpMetricsExporterTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpMetricsExporterTests.cs @@ -7,7 +7,6 @@ using Google.Protobuf; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection; -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Serializer; using OpenTelemetry.Metrics; using OpenTelemetry.Resources; @@ -159,12 +158,9 @@ public void ServiceProviderHttpClientFactoryInvoked() } [Theory] - [InlineData(true, true)] - [InlineData(true, false)] - - [InlineData(false, true)] - [InlineData(false, false)] - public void ToOtlpResourceMetricsTest(bool useCustomSerializer, bool includeServiceNameInResource) + [InlineData(true)] + [InlineData(false)] + public void ToOtlpResourceMetricsTest(bool includeServiceNameInResource) { var resourceBuilder = ResourceBuilder.CreateEmpty(); if (includeServiceNameInResource) @@ -198,17 +194,7 @@ public void ToOtlpResourceMetricsTest(bool useCustomSerializer, bool includeServ provider.ForceFlush(); var batch = new Batch(metrics.ToArray(), metrics.Count); - - var request = new OtlpCollector.ExportMetricsServiceRequest(); - - if (useCustomSerializer) - { - request = CreateMetricExportRequest(batch, resourceBuilder.Build()); - } - else - { - request.AddMetrics(resourceBuilder.Build().ToOtlpResource(), batch); - } + var request = CreateMetricExportRequest(batch, resourceBuilder.Build()); Assert.Single(request.ResourceMetrics); var resourceMetric = request.ResourceMetrics.First(); @@ -236,18 +222,12 @@ public void ToOtlpResourceMetricsTest(bool useCustomSerializer, bool includeServ } [Theory] - [InlineData(true, "test_gauge", null, null, 123L, null)] - [InlineData(true, "test_gauge", null, null, null, 123.45)] - [InlineData(true, "test_gauge", null, null, 123L, null, true)] - [InlineData(true, "test_gauge", null, null, null, 123.45, true)] - [InlineData(true, "test_gauge", "description", "unit", 123L, null)] - - [InlineData(false, "test_gauge", null, null, 123L, null)] - [InlineData(false, "test_gauge", null, null, null, 123.45)] - [InlineData(false, "test_gauge", null, null, 123L, null, true)] - [InlineData(false, "test_gauge", null, null, null, 123.45, true)] - [InlineData(false, "test_gauge", "description", "unit", 123L, null)] - public void TestGaugeToOtlpMetric(bool useCustomSerializer, string name, string? description, string? unit, long? longValue, double? doubleValue, bool enableExemplars = false) + [InlineData("test_gauge", null, null, 123L, null)] + [InlineData("test_gauge", null, null, null, 123.45)] + [InlineData("test_gauge", null, null, 123L, null, true)] + [InlineData("test_gauge", null, null, null, 123.45, true)] + [InlineData("test_gauge", "description", "unit", 123L, null)] + public void TestGaugeToOtlpMetric(string name, string? description, string? unit, long? longValue, double? doubleValue, bool enableExemplars = false) { var metrics = new List(); @@ -271,16 +251,7 @@ public void TestGaugeToOtlpMetric(bool useCustomSerializer, string name, string? var batch = new Batch(metrics.ToArray(), metrics.Count); - var request = new OtlpCollector.ExportMetricsServiceRequest(); - - if (useCustomSerializer) - { - request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); - } - else - { - request.AddMetrics(ResourceBuilder.CreateEmpty().Build().ToOtlpResource(), batch); - } + var request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); var resourceMetric = request.ResourceMetrics.Single(); var scopeMetrics = resourceMetric.ScopeMetrics.Single(); @@ -320,28 +291,17 @@ public void TestGaugeToOtlpMetric(bool useCustomSerializer, string name, string? } [Theory] - [InlineData(true, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(true, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(true, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] - [InlineData(true, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta)] - [InlineData(true, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(true, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(true, "test_counter", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] - - [InlineData(false, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(false, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(false, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] - [InlineData(false, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta)] - [InlineData(false, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(false, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(false, "test_counter", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] - public void TestCounterToOtlpMetric(bool useCustomSerializer, string name, string? description, string? unit, long? longValue, double? doubleValue, MetricReaderTemporalityPreference aggregationTemporality, bool enableKeyValues = false, bool enableExemplars = false) + [InlineData("test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] + [InlineData("test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] + [InlineData("test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] + [InlineData("test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta)] + [InlineData("test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] + [InlineData("test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta, false, true)] + [InlineData("test_counter", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] + public void TestCounterToOtlpMetric(string name, string? description, string? unit, long? longValue, double? doubleValue, MetricReaderTemporalityPreference aggregationTemporality, bool enableKeyValues = false, bool enableExemplars = false) { var metrics = new List(); @@ -370,17 +330,7 @@ public void TestCounterToOtlpMetric(bool useCustomSerializer, string name, strin provider.ForceFlush(); var batch = new Batch(metrics.ToArray(), metrics.Count); - - var request = new OtlpCollector.ExportMetricsServiceRequest(); - - if (useCustomSerializer) - { - request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); - } - else - { - request.AddMetrics(ResourceBuilder.CreateEmpty().Build().ToOtlpResource(), batch); - } + var request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); var resourceMetric = request.ResourceMetrics.Single(); var scopeMetrics = resourceMetric.ScopeMetrics.Single(); @@ -434,32 +384,19 @@ public void TestCounterToOtlpMetric(bool useCustomSerializer, string name, strin } [Theory] - [InlineData(true, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(true, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(true, "test_counter", null, null, -123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_counter", null, null, null, -123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] - [InlineData(true, "test_counter", null, null, null, -123.45, MetricReaderTemporalityPreference.Delta)] - [InlineData(true, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(true, "test_counter", null, null, null, -123.45, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(true, "test_counter", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] - - [InlineData(false, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(false, "test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(false, "test_counter", null, null, -123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_counter", null, null, null, -123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] - [InlineData(false, "test_counter", null, null, null, -123.45, MetricReaderTemporalityPreference.Delta)] - [InlineData(false, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(false, "test_counter", null, null, null, -123.45, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(false, "test_counter", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] - public void TestUpDownCounterToOtlpMetric(bool useCustomSerializer, string name, string? description, string? unit, long? longValue, double? doubleValue, MetricReaderTemporalityPreference aggregationTemporality, bool enableKeyValues = false, bool enableExemplars = false) + [InlineData("test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] + [InlineData("test_counter", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] + [InlineData("test_counter", null, null, -123L, null, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_counter", null, null, null, -123.45, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] + [InlineData("test_counter", null, null, null, -123.45, MetricReaderTemporalityPreference.Delta)] + [InlineData("test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] + [InlineData("test_counter", null, null, null, -123.45, MetricReaderTemporalityPreference.Delta, false, true)] + [InlineData("test_counter", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_counter", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] + public void TestUpDownCounterToOtlpMetric(string name, string? description, string? unit, long? longValue, double? doubleValue, MetricReaderTemporalityPreference aggregationTemporality, bool enableKeyValues = false, bool enableExemplars = false) { var metrics = new List(); @@ -489,15 +426,7 @@ public void TestUpDownCounterToOtlpMetric(bool useCustomSerializer, string name, var batch = new Batch(metrics.ToArray(), metrics.Count); - var request = new OtlpCollector.ExportMetricsServiceRequest(); - if (useCustomSerializer) - { - request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); - } - else - { - request.AddMetrics(ResourceBuilder.CreateEmpty().Build().ToOtlpResource(), batch); - } + var request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); var resourceMetric = request.ResourceMetrics.Single(); var scopeMetrics = resourceMetric.ScopeMetrics.Single(); @@ -551,32 +480,19 @@ public void TestUpDownCounterToOtlpMetric(bool useCustomSerializer, string name, } [Theory] - [InlineData(true, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(true, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(true, "test_histogram", null, null, -123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_histogram", null, null, null, -123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] - [InlineData(true, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta)] - [InlineData(true, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(true, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(true, "test_histogram", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] - - [InlineData(false, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(false, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(false, "test_histogram", null, null, -123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_histogram", null, null, null, -123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] - [InlineData(false, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta)] - [InlineData(false, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(false, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(false, "test_histogram", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] - public void TestExponentialHistogramToOtlpMetric(bool useCustomSerializer, string name, string? description, string? unit, long? longValue, double? doubleValue, MetricReaderTemporalityPreference aggregationTemporality, bool enableKeyValues = false, bool enableExemplars = false) + [InlineData("test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] + [InlineData("test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] + [InlineData("test_histogram", null, null, -123L, null, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_histogram", null, null, null, -123.45, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] + [InlineData("test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta)] + [InlineData("test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] + [InlineData("test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta, false, true)] + [InlineData("test_histogram", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] + public void TestExponentialHistogramToOtlpMetric(string name, string? description, string? unit, long? longValue, double? doubleValue, MetricReaderTemporalityPreference aggregationTemporality, bool enableKeyValues = false, bool enableExemplars = false) { var metrics = new List(); @@ -611,16 +527,7 @@ public void TestExponentialHistogramToOtlpMetric(bool useCustomSerializer, strin provider.ForceFlush(); var batch = new Batch(metrics.ToArray(), metrics.Count); - - var request = new OtlpCollector.ExportMetricsServiceRequest(); - if (useCustomSerializer) - { - request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); - } - else - { - request.AddMetrics(ResourceBuilder.CreateEmpty().Build().ToOtlpResource(), batch); - } + var request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); var resourceMetric = request.ResourceMetrics.Single(); var scopeMetrics = resourceMetric.ScopeMetrics.Single(); @@ -711,32 +618,19 @@ public void TestExponentialHistogramToOtlpMetric(bool useCustomSerializer, strin } [Theory] - [InlineData(true, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(true, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(true, "test_histogram", null, null, -123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_histogram", null, null, null, -123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] - [InlineData(true, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta)] - [InlineData(true, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(true, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(true, "test_histogram", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(true, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] - - [InlineData(false, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(false, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] - [InlineData(false, "test_histogram", null, null, -123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_histogram", null, null, null, -123.45, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] - [InlineData(false, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta)] - [InlineData(false, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(false, "test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta, false, true)] - [InlineData(false, "test_histogram", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] - [InlineData(false, "test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] - public void TestHistogramToOtlpMetric(bool useCustomSerializer, string name, string? description, string? unit, long? longValue, double? doubleValue, MetricReaderTemporalityPreference aggregationTemporality, bool enableKeyValues = false, bool enableExemplars = false) + [InlineData("test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Cumulative, false, true)] + [InlineData("test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Cumulative, false, true)] + [InlineData("test_histogram", null, null, -123L, null, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_histogram", null, null, null, -123.45, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta)] + [InlineData("test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta)] + [InlineData("test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, false, true)] + [InlineData("test_histogram", null, null, null, 123.45, MetricReaderTemporalityPreference.Delta, false, true)] + [InlineData("test_histogram", "description", "unit", 123L, null, MetricReaderTemporalityPreference.Cumulative)] + [InlineData("test_histogram", null, null, 123L, null, MetricReaderTemporalityPreference.Delta, true)] + public void TestHistogramToOtlpMetric(string name, string? description, string? unit, long? longValue, double? doubleValue, MetricReaderTemporalityPreference aggregationTemporality, bool enableKeyValues = false, bool enableExemplars = false) { var metrics = new List(); @@ -765,16 +659,7 @@ public void TestHistogramToOtlpMetric(bool useCustomSerializer, string name, str provider.ForceFlush(); var batch = new Batch(metrics.ToArray(), metrics.Count); - - var request = new OtlpCollector.ExportMetricsServiceRequest(); - if (useCustomSerializer) - { - request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); - } - else - { - request.AddMetrics(ResourceBuilder.CreateEmpty().Build().ToOtlpResource(), batch); - } + var request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); var resourceMetric = request.ResourceMetrics.Single(); var scopeMetrics = resourceMetric.ScopeMetrics.Single(); @@ -895,16 +780,11 @@ public void TestTemporalityPreferenceUsingEnvVar(string configValue, MetricReade } [Theory] - [InlineData(true, false, false)] - [InlineData(true, true, false)] - [InlineData(true, false, true)] - [InlineData(true, true, true)] - - [InlineData(false, false, false)] - [InlineData(false, true, false)] - [InlineData(false, false, true)] - [InlineData(false, true, true)] - public void ToOtlpExemplarTests(bool useCustomSerializer, bool enableTagFiltering, bool enableTracing) + [InlineData(false, false)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(true, true)] + public void ToOtlpExemplarTests(bool enableTagFiltering, bool enableTracing) { ActivitySource? activitySource = null; Activity? activity = null; @@ -949,15 +829,7 @@ public void ToOtlpExemplarTests(bool useCustomSerializer, bool enableTagFilterin meterProvider.ForceFlush(); var batch = new Batch(exportedItems.ToArray(), exportedItems.Count); - var request = new OtlpCollector.ExportMetricsServiceRequest(); - if (useCustomSerializer) - { - request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); - } - else - { - request.AddMetrics(ResourceBuilder.CreateEmpty().Build().ToOtlpResource(), batch); - } + var request = CreateMetricExportRequest(batch, ResourceBuilder.CreateEmpty().Build()); Assert.Single(request.ResourceMetrics); var resourceMetric = request.ResourceMetrics.First(); diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpResourceTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpResourceTests.cs index 57b1c58fdf2..4d309326a78 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpResourceTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpResourceTests.cs @@ -1,7 +1,6 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation; using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.Serializer; using OpenTelemetry.Proto.Trace.V1; using OpenTelemetry.Resources; @@ -12,11 +11,9 @@ namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests; public class OtlpResourceTests { [Theory] - [InlineData(true, false)] - [InlineData(false, false)] - [InlineData(true, true)] - [InlineData(false, true)] - public void ToOtlpResourceTest(bool includeServiceNameInResource, bool useCustomSerializer) + [InlineData(true)] + [InlineData(false)] + public void ToOtlpResourceTest(bool includeServiceNameInResource) { // Targeted test to cover OTel Resource to OTLP Resource // conversion, independent of signals. @@ -29,21 +26,14 @@ public void ToOtlpResourceTest(bool includeServiceNameInResource, bool useCustom var resource = resourceBuilder.Build(); Proto.Resource.V1.Resource otlpResource; - if (useCustomSerializer) - { - byte[] buffer = new byte[1024]; - var writePosition = ProtobufOtlpResourceSerializer.WriteResource(buffer, 0, resource); + byte[] buffer = new byte[1024]; + var writePosition = ProtobufOtlpResourceSerializer.WriteResource(buffer, 0, resource); - // Deserialize the ResourceSpans and validate the attributes. - using (var stream = new MemoryStream(buffer, 0, writePosition)) - { - var resourceSpans = ResourceSpans.Parser.ParseFrom(stream); - otlpResource = resourceSpans.Resource; - } - } - else + // Deserialize the ResourceSpans and validate the attributes. + using (var stream = new MemoryStream(buffer, 0, writePosition)) { - otlpResource = resource.ToOtlpResource(); + var resourceSpans = ResourceSpans.Parser.ParseFrom(stream); + otlpResource = resourceSpans.Resource; } if (includeServiceNameInResource) diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpRetryTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpRetryTests.cs index e72faee429c..3ba577c7301 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpRetryTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpRetryTests.cs @@ -8,7 +8,7 @@ #endif using Google.Protobuf; using Google.Protobuf.WellKnownTypes; -using Grpc.Core; +using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient.Grpc; using Xunit; namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient.Tests; @@ -29,11 +29,10 @@ public void TryGetGrpcRetryResultTest(GrpcRetryTestCase testCase) foreach (var retryAttempt in testCase.RetryAttempts) { ++attempts; - var rpcException = retryAttempt.Response.Exception as RpcException; - Assert.NotNull(rpcException); - var statusCode = rpcException.StatusCode; + Assert.NotNull(retryAttempt.Response.Status); + var statusCode = retryAttempt.Response.Status.Value.StatusCode; var deadline = retryAttempt.Response.DeadlineUtc; - var trailers = rpcException.Trailers; + var trailers = retryAttempt.Response.GrpcStatusDetailsHeader; var success = OtlpRetry.TryGetGrpcRetryResult(retryAttempt.Response, nextRetryDelayMilliseconds, out var retryResult); Assert.Equal(retryAttempt.ExpectedSuccess, success); @@ -46,7 +45,7 @@ public void TryGetGrpcRetryResultTest(GrpcRetryTestCase testCase) if (retryResult.Throttled) { - Assert.Equal(retryAttempt.ThrottleDelay, retryResult.RetryDelay); + Assert.Equal(GrpcStatusDeserializer.TryGetGrpcRetryDelay(retryAttempt.ThrottleDelay), retryResult.RetryDelay); } else { @@ -119,23 +118,23 @@ private GrpcRetryTestCase(string testRunnerName, GrpcRetryAttempt[] retryAttempt public static IEnumerable GetGrpcTestCases() { - yield return new[] { new GrpcRetryTestCase("Cancelled", new GrpcRetryAttempt[] { new(StatusCode.Cancelled) }) }; - yield return new[] { new GrpcRetryTestCase("DeadlineExceeded", new GrpcRetryAttempt[] { new(StatusCode.DeadlineExceeded) }) }; - yield return new[] { new GrpcRetryTestCase("Aborted", new GrpcRetryAttempt[] { new(StatusCode.Aborted) }) }; - yield return new[] { new GrpcRetryTestCase("OutOfRange", new GrpcRetryAttempt[] { new(StatusCode.OutOfRange) }) }; - yield return new[] { new GrpcRetryTestCase("DataLoss", new GrpcRetryAttempt[] { new(StatusCode.DataLoss) }) }; - yield return new[] { new GrpcRetryTestCase("Unavailable", new GrpcRetryAttempt[] { new(StatusCode.Unavailable) }) }; + yield return new[] { new GrpcRetryTestCase("Cancelled", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Cancelled) }) }; + yield return new[] { new GrpcRetryTestCase("DeadlineExceeded", new GrpcRetryAttempt[] { new(Grpc.StatusCode.DeadlineExceeded) }) }; + yield return new[] { new GrpcRetryTestCase("Aborted", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Aborted) }) }; + yield return new[] { new GrpcRetryTestCase("OutOfRange", new GrpcRetryAttempt[] { new(Grpc.StatusCode.OutOfRange) }) }; + yield return new[] { new GrpcRetryTestCase("DataLoss", new GrpcRetryAttempt[] { new(Grpc.StatusCode.DataLoss) }) }; + yield return new[] { new GrpcRetryTestCase("Unavailable", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Unavailable) }) }; - yield return new[] { new GrpcRetryTestCase("OK", new GrpcRetryAttempt[] { new(StatusCode.OK, expectedSuccess: false) }) }; - yield return new[] { new GrpcRetryTestCase("PermissionDenied", new GrpcRetryAttempt[] { new(StatusCode.PermissionDenied, expectedSuccess: false) }) }; - yield return new[] { new GrpcRetryTestCase("Unknown", new GrpcRetryAttempt[] { new(StatusCode.Unknown, expectedSuccess: false) }) }; + yield return new[] { new GrpcRetryTestCase("OK", new GrpcRetryAttempt[] { new(Grpc.StatusCode.OK, expectedSuccess: false) }) }; + yield return new[] { new GrpcRetryTestCase("PermissionDenied", new GrpcRetryAttempt[] { new(Grpc.StatusCode.PermissionDenied, expectedSuccess: false) }) }; + yield return new[] { new GrpcRetryTestCase("Unknown", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Unknown, expectedSuccess: false) }) }; - yield return new[] { new GrpcRetryTestCase("ResourceExhausted w/o RetryInfo", new GrpcRetryAttempt[] { new(StatusCode.ResourceExhausted, expectedSuccess: false) }) }; - yield return new[] { new GrpcRetryTestCase("ResourceExhausted w/ RetryInfo", new GrpcRetryAttempt[] { new(StatusCode.ResourceExhausted, throttleDelay: new Duration { Seconds = 2 }, expectedNextRetryDelayMilliseconds: 3000) }) }; + yield return new[] { new GrpcRetryTestCase("ResourceExhausted w/o RetryInfo", new GrpcRetryAttempt[] { new(Grpc.StatusCode.ResourceExhausted, expectedSuccess: false) }) }; + yield return new[] { new GrpcRetryTestCase("ResourceExhausted w/ RetryInfo", new GrpcRetryAttempt[] { new(Grpc.StatusCode.ResourceExhausted, throttleDelay: GetThrottleDelayString(new Duration { Seconds = 2 }), expectedNextRetryDelayMilliseconds: 3000) }) }; - yield return new[] { new GrpcRetryTestCase("Unavailable w/ RetryInfo", new GrpcRetryAttempt[] { new(StatusCode.Unavailable, throttleDelay: Duration.FromTimeSpan(TimeSpan.FromMilliseconds(2000)), expectedNextRetryDelayMilliseconds: 3000) }) }; + yield return new[] { new GrpcRetryTestCase("Unavailable w/ RetryInfo", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Unavailable, throttleDelay: GetThrottleDelayString(Duration.FromTimeSpan(TimeSpan.FromMilliseconds(2000))), expectedNextRetryDelayMilliseconds: 3000) }) }; - yield return new[] { new GrpcRetryTestCase("Expired deadline", new GrpcRetryAttempt[] { new(StatusCode.Unavailable, deadlineExceeded: true, expectedSuccess: false) }) }; + yield return new[] { new GrpcRetryTestCase("Expired deadline", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Unavailable, deadlineExceeded: true, expectedSuccess: false) }) }; yield return new[] { @@ -143,11 +142,11 @@ public static IEnumerable GetGrpcTestCases() "Exponential backoff", new GrpcRetryAttempt[] { - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3375), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3375), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), }, expectedRetryAttempts: 5), }; @@ -158,11 +157,11 @@ public static IEnumerable GetGrpcTestCases() "Retry until non-retryable status code encountered", new GrpcRetryAttempt[] { - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3375), - new(StatusCode.PermissionDenied, expectedSuccess: false), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3375), + new(Grpc.StatusCode.PermissionDenied, expectedSuccess: false), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), }, expectedRetryAttempts: 4), }; @@ -174,15 +173,15 @@ public static IEnumerable GetGrpcTestCases() "Exponential backoff after throttling", new GrpcRetryAttempt[] { - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), - new(StatusCode.Unavailable, throttleDelay: Duration.FromTimeSpan(TimeSpan.FromMilliseconds(500)), expectedNextRetryDelayMilliseconds: 750), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1125), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1688), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2532), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3798), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), - new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), + new(Grpc.StatusCode.Unavailable, throttleDelay: GetThrottleDelayString(Duration.FromTimeSpan(TimeSpan.FromMilliseconds(500))), expectedNextRetryDelayMilliseconds: 750), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1125), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1688), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2532), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3798), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), + new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), }, expectedRetryAttempts: 9), }; @@ -193,48 +192,46 @@ public override string ToString() return this.testRunnerName; } - private static Metadata GenerateTrailers(Duration throttleDelay) + private static string GetThrottleDelayString(Duration throttleDelay) { - var metadata = new Metadata(); - - var retryInfo = new Google.Rpc.RetryInfo(); - retryInfo.RetryDelay = throttleDelay; - - var status = new Google.Rpc.Status(); - status.Details.Add(Any.Pack(retryInfo)); - - var stream = new MemoryStream(); - status.WriteTo(stream); + var status = new Google.Rpc.Status + { + Code = 4, + Message = "Only nanos", + Details = + { + Any.Pack(new Google.Rpc.RetryInfo + { + RetryDelay = throttleDelay, + }), + }, + }; - metadata.Add(OtlpRetry.GrpcStatusDetailsHeader, stream.ToArray()); - return metadata; + return Convert.ToBase64String(status.ToByteArray()); } public struct GrpcRetryAttempt { - public TimeSpan? ThrottleDelay; + public string? ThrottleDelay; public int? ExpectedNextRetryDelayMilliseconds; public bool ExpectedSuccess; internal ExportClientGrpcResponse Response; - public GrpcRetryAttempt( - StatusCode statusCode, + internal GrpcRetryAttempt( + Grpc.StatusCode statusCode, bool deadlineExceeded = false, - Duration? throttleDelay = null, + string? throttleDelay = null, int expectedNextRetryDelayMilliseconds = 1500, bool expectedSuccess = true) { - var status = new Status(statusCode, "Error"); - var rpcException = throttleDelay != null - ? new RpcException(status, GenerateTrailers(throttleDelay)) - : new RpcException(status); + var status = new Grpc.Status(statusCode, "Error"); // Using arbitrary +1 hr for deadline for test purposes. var deadlineUtc = deadlineExceeded ? DateTime.UtcNow.AddSeconds(-1) : DateTime.UtcNow.AddHours(1); - this.ThrottleDelay = throttleDelay != null ? throttleDelay.ToTimeSpan() : null; + this.ThrottleDelay = throttleDelay; - this.Response = new ExportClientGrpcResponse(expectedSuccess, deadlineUtc, rpcException, null, null); + this.Response = new ExportClientGrpcResponse(expectedSuccess, deadlineUtc, null, status, this.ThrottleDelay); this.ExpectedNextRetryDelayMilliseconds = expectedNextRetryDelayMilliseconds; diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpTraceExporterTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpTraceExporterTests.cs index 6c876dda3d6..94b97ca1702 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpTraceExporterTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpTraceExporterTests.cs @@ -743,10 +743,10 @@ public void UseOpenTelemetryProtocolActivityExporterWithCustomActivityProcessor( [Fact] public void Shutdown_ClientShutdownIsCalled() { - var exportClientMock = new TestProtobufExportClient(); + var exportClientMock = new TestExportClient(); var exporterOptions = new OtlpExporterOptions(); - var transmissionHandler = new ProtobufOtlpExporterTransmissionHandler(exportClientMock, exporterOptions.TimeoutMilliseconds); + var transmissionHandler = new OtlpExporterTransmissionHandler(exportClientMock, exporterOptions.TimeoutMilliseconds); using var exporter = new OtlpTraceExporter(new OtlpExporterOptions(), DefaultSdkLimitOptions, DefaultExperimentalOptions, transmissionHandler); exporter.Shutdown(); diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/TestExportClient.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/TestExportClient.cs index bbecfa2fa37..8879f540210 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/TestExportClient.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/TestExportClient.cs @@ -5,7 +5,7 @@ namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests; -internal class TestExportClient(bool throwException = false) : IExportClient +internal class TestExportClient(bool throwException = false) : IExportClient { public bool SendExportRequestCalled { get; private set; } @@ -13,7 +13,7 @@ internal class TestExportClient(bool throwException = false) : IExportClient< public bool ThrowException { get; set; } = throwException; - public ExportClientResponse SendExportRequest(T request, DateTime deadlineUtc, CancellationToken cancellationToken = default) + public ExportClientResponse SendExportRequest(byte[] buffer, int contentLength, DateTime deadlineUtc, CancellationToken cancellationToken = default) { if (this.ThrowException) { diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/TestProtobufExportClient.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/TestProtobufExportClient.cs deleted file mode 100644 index 28fab7ea74c..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/TestProtobufExportClient.cs +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -using OpenTelemetry.Exporter.OpenTelemetryProtocol.Implementation.ExportClient; - -namespace OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests; - -internal class TestProtobufExportClient(bool throwException = false) : IProtobufExportClient -{ - public bool SendExportRequestCalled { get; private set; } - - public bool ShutdownCalled { get; private set; } - - public bool ThrowException { get; set; } = throwException; - - public ExportClientResponse SendExportRequest(byte[] buffer, int contentLength, DateTime deadlineUtc, CancellationToken cancellationToken = default) - { - if (this.ThrowException) - { - throw new Exception("Exception thrown from SendExportRequest"); - } - - this.SendExportRequestCalled = true; - return new TestExportClientResponse(true, deadlineUtc, null); - } - - public bool Shutdown(int timeoutMilliseconds) - { - this.ShutdownCalled = true; - return true; - } - - private class TestExportClientResponse : ExportClientResponse - { - public TestExportClientResponse(bool success, DateTime deadline, Exception? exception) - : base(success, deadline, exception) - { - } - } -} From 8a0beb9f1ae2a571d6b0367f1cd0262f17829a47 Mon Sep 17 00:00:00 2001 From: Rajkumar Rangaraj Date: Thu, 28 Nov 2024 14:10:36 -0800 Subject: [PATCH 05/10] Lint fixes --- .../OtlpRetryTests.cs | 68 +++++++++---------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpRetryTests.cs b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpRetryTests.cs index 3ba577c7301..b380d92d6b0 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpRetryTests.cs +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OtlpRetryTests.cs @@ -118,23 +118,23 @@ private GrpcRetryTestCase(string testRunnerName, GrpcRetryAttempt[] retryAttempt public static IEnumerable GetGrpcTestCases() { - yield return new[] { new GrpcRetryTestCase("Cancelled", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Cancelled) }) }; - yield return new[] { new GrpcRetryTestCase("DeadlineExceeded", new GrpcRetryAttempt[] { new(Grpc.StatusCode.DeadlineExceeded) }) }; - yield return new[] { new GrpcRetryTestCase("Aborted", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Aborted) }) }; - yield return new[] { new GrpcRetryTestCase("OutOfRange", new GrpcRetryAttempt[] { new(Grpc.StatusCode.OutOfRange) }) }; - yield return new[] { new GrpcRetryTestCase("DataLoss", new GrpcRetryAttempt[] { new(Grpc.StatusCode.DataLoss) }) }; - yield return new[] { new GrpcRetryTestCase("Unavailable", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Unavailable) }) }; + yield return new[] { new GrpcRetryTestCase("Cancelled", new GrpcRetryAttempt[] { new(StatusCode.Cancelled) }) }; + yield return new[] { new GrpcRetryTestCase("DeadlineExceeded", new GrpcRetryAttempt[] { new(StatusCode.DeadlineExceeded) }) }; + yield return new[] { new GrpcRetryTestCase("Aborted", new GrpcRetryAttempt[] { new(StatusCode.Aborted) }) }; + yield return new[] { new GrpcRetryTestCase("OutOfRange", new GrpcRetryAttempt[] { new(StatusCode.OutOfRange) }) }; + yield return new[] { new GrpcRetryTestCase("DataLoss", new GrpcRetryAttempt[] { new(StatusCode.DataLoss) }) }; + yield return new[] { new GrpcRetryTestCase("Unavailable", new GrpcRetryAttempt[] { new(StatusCode.Unavailable) }) }; - yield return new[] { new GrpcRetryTestCase("OK", new GrpcRetryAttempt[] { new(Grpc.StatusCode.OK, expectedSuccess: false) }) }; - yield return new[] { new GrpcRetryTestCase("PermissionDenied", new GrpcRetryAttempt[] { new(Grpc.StatusCode.PermissionDenied, expectedSuccess: false) }) }; - yield return new[] { new GrpcRetryTestCase("Unknown", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Unknown, expectedSuccess: false) }) }; + yield return new[] { new GrpcRetryTestCase("OK", new GrpcRetryAttempt[] { new(StatusCode.OK, expectedSuccess: false) }) }; + yield return new[] { new GrpcRetryTestCase("PermissionDenied", new GrpcRetryAttempt[] { new(StatusCode.PermissionDenied, expectedSuccess: false) }) }; + yield return new[] { new GrpcRetryTestCase("Unknown", new GrpcRetryAttempt[] { new(StatusCode.Unknown, expectedSuccess: false) }) }; - yield return new[] { new GrpcRetryTestCase("ResourceExhausted w/o RetryInfo", new GrpcRetryAttempt[] { new(Grpc.StatusCode.ResourceExhausted, expectedSuccess: false) }) }; - yield return new[] { new GrpcRetryTestCase("ResourceExhausted w/ RetryInfo", new GrpcRetryAttempt[] { new(Grpc.StatusCode.ResourceExhausted, throttleDelay: GetThrottleDelayString(new Duration { Seconds = 2 }), expectedNextRetryDelayMilliseconds: 3000) }) }; + yield return new[] { new GrpcRetryTestCase("ResourceExhausted w/o RetryInfo", new GrpcRetryAttempt[] { new(StatusCode.ResourceExhausted, expectedSuccess: false) }) }; + yield return new[] { new GrpcRetryTestCase("ResourceExhausted w/ RetryInfo", new GrpcRetryAttempt[] { new(StatusCode.ResourceExhausted, throttleDelay: GetThrottleDelayString(new Duration { Seconds = 2 }), expectedNextRetryDelayMilliseconds: 3000) }) }; - yield return new[] { new GrpcRetryTestCase("Unavailable w/ RetryInfo", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Unavailable, throttleDelay: GetThrottleDelayString(Duration.FromTimeSpan(TimeSpan.FromMilliseconds(2000))), expectedNextRetryDelayMilliseconds: 3000) }) }; + yield return new[] { new GrpcRetryTestCase("Unavailable w/ RetryInfo", new GrpcRetryAttempt[] { new(StatusCode.Unavailable, throttleDelay: GetThrottleDelayString(Duration.FromTimeSpan(TimeSpan.FromMilliseconds(2000))), expectedNextRetryDelayMilliseconds: 3000) }) }; - yield return new[] { new GrpcRetryTestCase("Expired deadline", new GrpcRetryAttempt[] { new(Grpc.StatusCode.Unavailable, deadlineExceeded: true, expectedSuccess: false) }) }; + yield return new[] { new GrpcRetryTestCase("Expired deadline", new GrpcRetryAttempt[] { new(StatusCode.Unavailable, deadlineExceeded: true, expectedSuccess: false) }) }; yield return new[] { @@ -142,11 +142,11 @@ public static IEnumerable GetGrpcTestCases() "Exponential backoff", new GrpcRetryAttempt[] { - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3375), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3375), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), }, expectedRetryAttempts: 5), }; @@ -157,11 +157,11 @@ public static IEnumerable GetGrpcTestCases() "Retry until non-retryable status code encountered", new GrpcRetryAttempt[] { - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3375), - new(Grpc.StatusCode.PermissionDenied, expectedSuccess: false), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3375), + new(StatusCode.PermissionDenied, expectedSuccess: false), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), }, expectedRetryAttempts: 4), }; @@ -173,15 +173,15 @@ public static IEnumerable GetGrpcTestCases() "Exponential backoff after throttling", new GrpcRetryAttempt[] { - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), - new(Grpc.StatusCode.Unavailable, throttleDelay: GetThrottleDelayString(Duration.FromTimeSpan(TimeSpan.FromMilliseconds(500))), expectedNextRetryDelayMilliseconds: 750), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1125), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1688), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2532), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3798), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), - new(Grpc.StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1500), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2250), + new(StatusCode.Unavailable, throttleDelay: GetThrottleDelayString(Duration.FromTimeSpan(TimeSpan.FromMilliseconds(500))), expectedNextRetryDelayMilliseconds: 750), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1125), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 1688), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 2532), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 3798), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), + new(StatusCode.Unavailable, expectedNextRetryDelayMilliseconds: 5000), }, expectedRetryAttempts: 9), }; @@ -218,13 +218,13 @@ public struct GrpcRetryAttempt internal ExportClientGrpcResponse Response; internal GrpcRetryAttempt( - Grpc.StatusCode statusCode, + StatusCode statusCode, bool deadlineExceeded = false, string? throttleDelay = null, int expectedNextRetryDelayMilliseconds = 1500, bool expectedSuccess = true) { - var status = new Grpc.Status(statusCode, "Error"); + var status = new Status(statusCode, "Error"); // Using arbitrary +1 hr for deadline for test purposes. var deadlineUtc = deadlineExceeded ? DateTime.UtcNow.AddSeconds(-1) : DateTime.UtcNow.AddHours(1); From 0599950a79f922cdcf5cd8aa80b438bdd3bf8f2a Mon Sep 17 00:00:00 2001 From: Mikel Blanchard Date: Mon, 2 Dec 2024 13:25:02 -0800 Subject: [PATCH 06/10] Cleanup proto README. --- .../Implementation/README.md | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/README.md diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/README.md b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/README.md deleted file mode 100644 index d70d44456e2..00000000000 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# OpenTelemetry Protocol Implementation - -`.proto` files under `Implementation\` are copied from the -[`opentelemetry-proto`](https://github.com/open-telemetry/opentelemetry-proto/commit/1a931b4b57c34e7fd8f7dddcaa9b7587840e9c08) -repo. - -Trace proto files marked as stable. From bf4becdb38510cb57e12f983db747d39910013ef Mon Sep 17 00:00:00 2001 From: Mikel Blanchard Date: Mon, 2 Dec 2024 14:05:44 -0800 Subject: [PATCH 07/10] Tweaks. --- .editorconfig | 2 +- OpenTelemetry.sln | 6 + src/Shared/Proto/README.md | 5 + .../Proto}/google/rpc/error_details.proto | 0 .../Shared/Proto}/google/rpc/status.proto | 0 .../opentelemetry/proto/collector/README.md | 0 .../collector/logs/v1/logs_service.proto | 0 .../collector/logs/v1/logs_service_http.yaml | 0 .../metrics/v1/metrics_service.proto | 0 .../metrics/v1/metrics_service_http.yaml | 0 .../v1experimental/profiles_service.proto | 0 .../v1experimental/profiles_service_http.yaml | 0 .../collector/trace/v1/trace_service.proto | 0 .../trace/v1/trace_service_http.yaml | 0 .../proto/common/v1/common.proto | 0 .../opentelemetry/proto/logs/v1/logs.proto | 0 .../proto/metrics/v1/metrics.proto | 0 .../v1experimental/pprofextended.proto | 0 .../profiles/v1experimental/profiles.proto | 0 .../proto/resource/v1/resource.proto | 0 .../opentelemetry/proto/trace/v1/trace.proto | 0 test/Benchmarks/Benchmarks.csproj | 8 +- .../google/rpc/error_details.proto | 37 - .../Implementation/google/rpc/status.proto | 42 -- .../opentelemetry/proto/collector/README.md | 10 - .../collector/logs/v1/logs_service.proto | 79 -- .../collector/logs/v1/logs_service_http.yaml | 9 - .../metrics/v1/metrics_service.proto | 79 -- .../metrics/v1/metrics_service_http.yaml | 9 - .../v1experimental/profiles_service.proto | 78 -- .../v1experimental/profiles_service_http.yaml | 9 - .../collector/trace/v1/trace_service.proto | 79 -- .../trace/v1/trace_service_http.yaml | 9 - .../proto/common/v1/common.proto | 81 -- .../opentelemetry/proto/logs/v1/logs.proto | 211 ------ .../proto/metrics/v1/metrics.proto | 691 ------------------ .../v1experimental/pprofextended.proto | 388 ---------- .../profiles/v1experimental/profiles.proto | 191 ----- .../proto/resource/v1/resource.proto | 37 - .../opentelemetry/proto/trace/v1/trace.proto | 355 --------- ...xporter.OpenTelemetryProtocol.Tests.csproj | 5 +- 41 files changed, 18 insertions(+), 2402 deletions(-) create mode 100644 src/Shared/Proto/README.md rename {test/Benchmarks/Implementation => src/Shared/Proto}/google/rpc/error_details.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/google/rpc/status.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/collector/README.md (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/collector/logs/v1/logs_service.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/collector/metrics/v1/metrics_service.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/collector/trace/v1/trace_service.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/common/v1/common.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/logs/v1/logs.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/metrics/v1/metrics.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/profiles/v1experimental/pprofextended.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/profiles/v1experimental/profiles.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/resource/v1/resource.proto (100%) rename {test/Benchmarks/Implementation => src/Shared/Proto}/opentelemetry/proto/trace/v1/trace.proto (100%) delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/error_details.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/status.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/README.md delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/common/v1/common.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/logs/v1/logs.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/metrics/v1/metrics.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/resource/v1/resource.proto delete mode 100644 test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/trace/v1/trace.proto diff --git a/.editorconfig b/.editorconfig index 45d0806a20b..cdfa4e32468 100644 --- a/.editorconfig +++ b/.editorconfig @@ -156,7 +156,7 @@ dotnet_diagnostic.IDE0005.severity = warning # RS0041: Public members should not use oblivious types dotnet_diagnostic.RS0041.severity = suggestion -[obj/**.cs] +[**/obj/**.cs] generated_code = true [*.csproj] diff --git a/OpenTelemetry.sln b/OpenTelemetry.sln index 1b3928c1163..c109bf10753 100644 --- a/OpenTelemetry.sln +++ b/OpenTelemetry.sln @@ -345,6 +345,11 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "scripts", "scripts", "{4498 EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "exemplars", "docs\metrics\exemplars\exemplars.csproj", "{79C12C80-B27B-41FB-AE79-A3BB74CFA782}" EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Proto", "Proto", "{02EA681E-C7D8-13C7-8484-4AC65E1B71E8}" + ProjectSection(SolutionItems) = preProject + src\Shared\Proto\README.md = src\Shared\Proto\README.md + EndProjectSection +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -661,6 +666,7 @@ Global {993E65E5-E71B-40FD-871C-60A9EBD59724} = {A49299FB-C5CD-4E0E-B7E1-B7867BBD67CC} {44982E0D-C8C6-42DC-9F8F-714981F27CE6} = {7CB2F02E-03FA-4FFF-89A5-C51F107623FD} {79C12C80-B27B-41FB-AE79-A3BB74CFA782} = {3277B1C0-BDFE-4460-9B0D-D9A661FB48DB} + {02EA681E-C7D8-13C7-8484-4AC65E1B71E8} = {A49299FB-C5CD-4E0E-B7E1-B7867BBD67CC} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {55639B5C-0770-4A22-AB56-859604650521} diff --git a/src/Shared/Proto/README.md b/src/Shared/Proto/README.md new file mode 100644 index 00000000000..c7b2db9302a --- /dev/null +++ b/src/Shared/Proto/README.md @@ -0,0 +1,5 @@ +# OpenTelemetry Protocol (OTLP) Specification + +`.proto` files are copied from the +[`opentelemetry-proto`](https://github.com/open-telemetry/opentelemetry-proto/commit/1a931b4b57c34e7fd8f7dddcaa9b7587840e9c08) +repo. diff --git a/test/Benchmarks/Implementation/google/rpc/error_details.proto b/src/Shared/Proto/google/rpc/error_details.proto similarity index 100% rename from test/Benchmarks/Implementation/google/rpc/error_details.proto rename to src/Shared/Proto/google/rpc/error_details.proto diff --git a/test/Benchmarks/Implementation/google/rpc/status.proto b/src/Shared/Proto/google/rpc/status.proto similarity index 100% rename from test/Benchmarks/Implementation/google/rpc/status.proto rename to src/Shared/Proto/google/rpc/status.proto diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/README.md b/src/Shared/Proto/opentelemetry/proto/collector/README.md similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/collector/README.md rename to src/Shared/Proto/opentelemetry/proto/collector/README.md diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto b/src/Shared/Proto/opentelemetry/proto/collector/logs/v1/logs_service.proto similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto rename to src/Shared/Proto/opentelemetry/proto/collector/logs/v1/logs_service.proto diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml b/src/Shared/Proto/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml rename to src/Shared/Proto/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto b/src/Shared/Proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto rename to src/Shared/Proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml b/src/Shared/Proto/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml rename to src/Shared/Proto/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto b/src/Shared/Proto/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto rename to src/Shared/Proto/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml b/src/Shared/Proto/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml rename to src/Shared/Proto/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto b/src/Shared/Proto/opentelemetry/proto/collector/trace/v1/trace_service.proto similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto rename to src/Shared/Proto/opentelemetry/proto/collector/trace/v1/trace_service.proto diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml b/src/Shared/Proto/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml rename to src/Shared/Proto/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/common/v1/common.proto b/src/Shared/Proto/opentelemetry/proto/common/v1/common.proto similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/common/v1/common.proto rename to src/Shared/Proto/opentelemetry/proto/common/v1/common.proto diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/logs/v1/logs.proto b/src/Shared/Proto/opentelemetry/proto/logs/v1/logs.proto similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/logs/v1/logs.proto rename to src/Shared/Proto/opentelemetry/proto/logs/v1/logs.proto diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/metrics/v1/metrics.proto b/src/Shared/Proto/opentelemetry/proto/metrics/v1/metrics.proto similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/metrics/v1/metrics.proto rename to src/Shared/Proto/opentelemetry/proto/metrics/v1/metrics.proto diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto b/src/Shared/Proto/opentelemetry/proto/profiles/v1experimental/pprofextended.proto similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto rename to src/Shared/Proto/opentelemetry/proto/profiles/v1experimental/pprofextended.proto diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto b/src/Shared/Proto/opentelemetry/proto/profiles/v1experimental/profiles.proto similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto rename to src/Shared/Proto/opentelemetry/proto/profiles/v1experimental/profiles.proto diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/resource/v1/resource.proto b/src/Shared/Proto/opentelemetry/proto/resource/v1/resource.proto similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/resource/v1/resource.proto rename to src/Shared/Proto/opentelemetry/proto/resource/v1/resource.proto diff --git a/test/Benchmarks/Implementation/opentelemetry/proto/trace/v1/trace.proto b/src/Shared/Proto/opentelemetry/proto/trace/v1/trace.proto similarity index 100% rename from test/Benchmarks/Implementation/opentelemetry/proto/trace/v1/trace.proto rename to src/Shared/Proto/opentelemetry/proto/trace/v1/trace.proto diff --git a/test/Benchmarks/Benchmarks.csproj b/test/Benchmarks/Benchmarks.csproj index 7e4eb3961e5..cadc5e4ffa5 100644 --- a/test/Benchmarks/Benchmarks.csproj +++ b/test/Benchmarks/Benchmarks.csproj @@ -1,9 +1,9 @@ - + Exe $(TargetFrameworksForTests) - $(NoWarn);SA1518;SA1210 + @@ -36,8 +36,8 @@ - - Implementation + + $(RepoRoot)\src\Shared\Proto diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/error_details.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/error_details.proto deleted file mode 100644 index 0d5461a7287..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/error_details.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.rpc; - -import "google/protobuf/duration.proto"; - -// Describes when the clients can retry a failed request. Clients could ignore -// the recommendation here or retry when this information is missing from error -// responses. -// -// It's always recommended that clients should use exponential backoff when -// retrying. -// -// Clients should wait until `retry_delay` amount of time has passed since -// receiving the error response before retrying. If retrying requests also -// fail, clients should use an exponential backoff scheme to gradually increase -// the delay between retries based on `retry_delay`, until either a maximum -// number of retries have been reached or a maximum retry delay cap has been -// reached. -message RetryInfo { - // Clients should wait at least this long between retrying the same request. - google.protobuf.Duration retry_delay = 1; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/status.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/status.proto deleted file mode 100644 index 40c5bf318c2..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/google/rpc/status.proto +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.rpc; - -import "google/protobuf/any.proto"; - -// The `Status` type defines a logical error model that is suitable for -// different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). Each `Status` message contains -// three pieces of data: error code, error message, and error details. -// -// You can find out more about this error model and how to work with it in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). -message Status { - // The status code, which should be an enum value of - // [google.rpc.Code][google.rpc.Code]. - int32 code = 1; - - // A developer-facing error message, which should be in English. Any - // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized - // by the client. - string message = 2; - - // A list of messages that carry the error details. There is a common set of - // message types for APIs to use. - repeated google.protobuf.Any details = 3; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/README.md b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/README.md deleted file mode 100644 index f82dbb0278b..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# OpenTelemetry Collector Proto - -This package describes the OpenTelemetry collector protocol. - -## Packages - -1. `common` package contains the common messages shared between different services. -2. `trace` package contains the Trace Service protos. -3. `metrics` package contains the Metrics Service protos. -4. `logs` package contains the Logs Service protos. diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto deleted file mode 100644 index 8260d8aaeb8..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service.proto +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.logs.v1; - -import "opentelemetry/proto/logs/v1/logs.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Logs.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.logs.v1"; -option java_outer_classname = "LogsServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/logs/v1"; - -// Service that can be used to push logs between one Application instrumented with -// OpenTelemetry and an collector, or between an collector and a central collector (in this -// case logs are sent/received to/from multiple Applications). -service LogsService { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - rpc Export(ExportLogsServiceRequest) returns (ExportLogsServiceResponse) {} -} - -message ExportLogsServiceRequest { - // An array of ResourceLogs. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.logs.v1.ResourceLogs resource_logs = 1; -} - -message ExportLogsServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportLogsPartialSuccess partial_success = 1; -} - -message ExportLogsPartialSuccess { - // The number of rejected log records. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_log_records = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml deleted file mode 100644 index 507473b9b3b..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/logs/v1/logs_service_http.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the -# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. -type: google.api.Service -config_version: 3 -http: - rules: - - selector: opentelemetry.proto.collector.logs.v1.LogsService.Export - post: /v1/logs - body: "*" \ No newline at end of file diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto deleted file mode 100644 index dd48f1ad3a1..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service.proto +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.metrics.v1; - -import "opentelemetry/proto/metrics/v1/metrics.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Metrics.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.metrics.v1"; -option java_outer_classname = "MetricsServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/metrics/v1"; - -// Service that can be used to push metrics between one Application -// instrumented with OpenTelemetry and a collector, or between a collector and a -// central collector. -service MetricsService { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - rpc Export(ExportMetricsServiceRequest) returns (ExportMetricsServiceResponse) {} -} - -message ExportMetricsServiceRequest { - // An array of ResourceMetrics. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.metrics.v1.ResourceMetrics resource_metrics = 1; -} - -message ExportMetricsServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportMetricsPartialSuccess partial_success = 1; -} - -message ExportMetricsPartialSuccess { - // The number of rejected data points. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_data_points = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml deleted file mode 100644 index a5456502607..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/metrics/v1/metrics_service_http.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the -# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. -type: google.api.Service -config_version: 3 -http: - rules: - - selector: opentelemetry.proto.collector.metrics.v1.MetricsService.Export - post: /v1/metrics - body: "*" \ No newline at end of file diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto deleted file mode 100644 index d0e7894b29c..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2023, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.profiles.v1experimental; - -import "opentelemetry/proto/profiles/v1experimental/profiles.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Profiles.V1Experimental"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.profiles.v1experimental"; -option java_outer_classname = "ProfilesServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/profiles/v1experimental"; - -// Service that can be used to push profiles between one Application instrumented with -// OpenTelemetry and a collector, or between a collector and a central collector. -service ProfilesService { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - rpc Export(ExportProfilesServiceRequest) returns (ExportProfilesServiceResponse) {} -} - -message ExportProfilesServiceRequest { - // An array of ResourceProfiles. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.profiles.v1experimental.ResourceProfiles resource_profiles = 1; -} - -message ExportProfilesServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportProfilesPartialSuccess partial_success = 1; -} - -message ExportProfilesPartialSuccess { - // The number of rejected profiles. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_profiles = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml deleted file mode 100644 index ea501afbbb2..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/profiles/v1experimental/profiles_service_http.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the -# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. -type: google.api.Service -config_version: 3 -http: - rules: - - selector: opentelemetry.proto.collector.profiles.v1experimental.ProfilesService.Export - post: /v1experimental/profiles - body: "*" diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto deleted file mode 100644 index d6fe67f9e55..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service.proto +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.collector.trace.v1; - -import "opentelemetry/proto/trace/v1/trace.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Collector.Trace.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.trace.v1"; -option java_outer_classname = "TraceServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/trace/v1"; - -// Service that can be used to push spans between one Application instrumented with -// OpenTelemetry and a collector, or between a collector and a central collector (in this -// case spans are sent/received to/from multiple Applications). -service TraceService { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - rpc Export(ExportTraceServiceRequest) returns (ExportTraceServiceResponse) {} -} - -message ExportTraceServiceRequest { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain one - // element. Intermediary nodes (such as OpenTelemetry Collector) that receive - // data from multiple origins typically batch the data before forwarding further and - // in that case this array will contain multiple elements. - repeated opentelemetry.proto.trace.v1.ResourceSpans resource_spans = 1; -} - -message ExportTraceServiceResponse { - // The details of a partially successful export request. - // - // If the request is only partially accepted - // (i.e. when the server accepts only parts of the data and rejects the rest) - // the server MUST initialize the `partial_success` field and MUST - // set the `rejected_` with the number of items it rejected. - // - // Servers MAY also make use of the `partial_success` field to convey - // warnings/suggestions to senders even when the request was fully accepted. - // In such cases, the `rejected_` MUST have a value of `0` and - // the `error_message` MUST be non-empty. - // - // A `partial_success` message with an empty value (rejected_ = 0 and - // `error_message` = "") is equivalent to it not being set/present. Senders - // SHOULD interpret it the same way as in the full success case. - ExportTracePartialSuccess partial_success = 1; -} - -message ExportTracePartialSuccess { - // The number of rejected spans. - // - // A `rejected_` field holding a `0` value indicates that the - // request was fully accepted. - int64 rejected_spans = 1; - - // A developer-facing human-readable message in English. It should be used - // either to explain why the server rejected parts of the data during a partial - // success or to convey warnings/suggestions during a full success. The message - // should offer guidance on how users can address such issues. - // - // error_message is an optional field. An error_message with an empty value - // is equivalent to it not being set. - string error_message = 2; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml deleted file mode 100644 index d091b3a8d53..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/collector/trace/v1/trace_service_http.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# This is an API configuration to generate an HTTP/JSON -> gRPC gateway for the -# OpenTelemetry service using github.com/grpc-ecosystem/grpc-gateway. -type: google.api.Service -config_version: 3 -http: - rules: - - selector: opentelemetry.proto.collector.trace.v1.TraceService.Export - post: /v1/traces - body: "*" diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/common/v1/common.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/common/v1/common.proto deleted file mode 100644 index ff8a21a1fa0..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/common/v1/common.proto +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.common.v1; - -option csharp_namespace = "OpenTelemetry.Proto.Common.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.common.v1"; -option java_outer_classname = "CommonProto"; -option go_package = "go.opentelemetry.io/proto/otlp/common/v1"; - -// AnyValue is used to represent any type of attribute value. AnyValue may contain a -// primitive value such as a string or integer or it may contain an arbitrary nested -// object containing arrays, key-value lists and primitives. -message AnyValue { - // The value is one of the listed fields. It is valid for all values to be unspecified - // in which case this AnyValue is considered to be "empty". - oneof value { - string string_value = 1; - bool bool_value = 2; - int64 int_value = 3; - double double_value = 4; - ArrayValue array_value = 5; - KeyValueList kvlist_value = 6; - bytes bytes_value = 7; - } -} - -// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message -// since oneof in AnyValue does not allow repeated fields. -message ArrayValue { - // Array of values. The array may be empty (contain 0 elements). - repeated AnyValue values = 1; -} - -// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message -// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need -// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to -// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches -// are semantically equivalent. -message KeyValueList { - // A collection of key/value pairs of key-value pairs. The list may be empty (may - // contain 0 elements). - // The keys MUST be unique (it is not allowed to have more than one - // value with the same key). - repeated KeyValue values = 1; -} - -// KeyValue is a key-value pair that is used to store Span attributes, Link -// attributes, etc. -message KeyValue { - string key = 1; - AnyValue value = 2; -} - -// InstrumentationScope is a message representing the instrumentation scope information -// such as the fully qualified name and version. -message InstrumentationScope { - // An empty instrumentation scope name means the name is unknown. - string name = 1; - string version = 2; - - // Additional attributes that describe the scope. [Optional]. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated KeyValue attributes = 3; - uint32 dropped_attributes_count = 4; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/logs/v1/logs.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/logs/v1/logs.proto deleted file mode 100644 index f9b97dd7451..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/logs/v1/logs.proto +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.logs.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Logs.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.logs.v1"; -option java_outer_classname = "LogsProto"; -option go_package = "go.opentelemetry.io/proto/otlp/logs/v1"; - -// LogsData represents the logs data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP logs data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message LogsData { - // An array of ResourceLogs. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceLogs resource_logs = 1; -} - -// A collection of ScopeLogs from a Resource. -message ResourceLogs { - reserved 1000; - - // The resource for the logs in this message. - // If this field is not set then resource info is unknown. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeLogs that originate from a resource. - repeated ScopeLogs scope_logs = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_logs" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Logs produced by a Scope. -message ScopeLogs { - // The instrumentation scope information for the logs in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of log records. - repeated LogRecord log_records = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the log data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all logs in the "logs" field. - string schema_url = 3; -} - -// Possible values for LogRecord.SeverityNumber. -enum SeverityNumber { - // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. - SEVERITY_NUMBER_UNSPECIFIED = 0; - SEVERITY_NUMBER_TRACE = 1; - SEVERITY_NUMBER_TRACE2 = 2; - SEVERITY_NUMBER_TRACE3 = 3; - SEVERITY_NUMBER_TRACE4 = 4; - SEVERITY_NUMBER_DEBUG = 5; - SEVERITY_NUMBER_DEBUG2 = 6; - SEVERITY_NUMBER_DEBUG3 = 7; - SEVERITY_NUMBER_DEBUG4 = 8; - SEVERITY_NUMBER_INFO = 9; - SEVERITY_NUMBER_INFO2 = 10; - SEVERITY_NUMBER_INFO3 = 11; - SEVERITY_NUMBER_INFO4 = 12; - SEVERITY_NUMBER_WARN = 13; - SEVERITY_NUMBER_WARN2 = 14; - SEVERITY_NUMBER_WARN3 = 15; - SEVERITY_NUMBER_WARN4 = 16; - SEVERITY_NUMBER_ERROR = 17; - SEVERITY_NUMBER_ERROR2 = 18; - SEVERITY_NUMBER_ERROR3 = 19; - SEVERITY_NUMBER_ERROR4 = 20; - SEVERITY_NUMBER_FATAL = 21; - SEVERITY_NUMBER_FATAL2 = 22; - SEVERITY_NUMBER_FATAL3 = 23; - SEVERITY_NUMBER_FATAL4 = 24; -} - -// LogRecordFlags represents constants used to interpret the -// LogRecord.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) -// -enum LogRecordFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - LOG_RECORD_FLAGS_DO_NOT_USE = 0; - - // Bits 0-7 are used for trace flags. - LOG_RECORD_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; - - // Bits 8-31 are reserved for future use. -} - -// A log record according to OpenTelemetry Log Data Model: -// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md -message LogRecord { - reserved 4; - - // time_unix_nano is the time when the event occurred. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - fixed64 time_unix_nano = 1; - - // Time when the event was observed by the collection system. - // For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) - // this timestamp is typically set at the generation time and is equal to Timestamp. - // For events originating externally and collected by OpenTelemetry (e.g. using - // Collector) this is the time when OpenTelemetry's code observed the event measured - // by the clock of the OpenTelemetry code. This field MUST be set once the event is - // observed by OpenTelemetry. - // - // For converting OpenTelemetry log data to formats that support only one timestamp or - // when receiving OpenTelemetry log data by recipients that support only one timestamp - // internally the following logic is recommended: - // - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - fixed64 observed_time_unix_nano = 11; - - // Numerical value of the severity, normalized to values described in Log Data Model. - // [Optional]. - SeverityNumber severity_number = 2; - - // The severity text (also known as log level). The original string representation as - // it is known at the source. [Optional]. - string severity_text = 3; - - // A value containing the body of the log record. Can be for example a human-readable - // string message (including multi-line) describing the event in a free form or it can - // be a structured data composed of arrays and maps of other values. [Optional]. - opentelemetry.proto.common.v1.AnyValue body = 5; - - // Additional attributes that describe the specific event occurrence. [Optional]. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 6; - uint32 dropped_attributes_count = 7; - - // Flags, a bit field. 8 least significant bits are the trace flags as - // defined in W3C Trace Context specification. 24 most significant bits are reserved - // and must be set to 0. Readers must not assume that 24 most significant bits - // will be zero and must correctly mask the bits when reading 8-bit trace flag (use - // flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional]. - fixed32 flags = 8; - - // A unique identifier for a trace. All logs from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is optional. - // - // The receivers SHOULD assume that the log record is not associated with a - // trace if any of the following is true: - // - the field is not present, - // - the field contains an invalid value. - bytes trace_id = 9; - - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is optional. If the sender specifies a valid span_id then it SHOULD also - // specify a valid trace_id. - // - // The receivers SHOULD assume that the log record is not associated with a - // span if any of the following is true: - // - the field is not present, - // - the field contains an invalid value. - bytes span_id = 10; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/metrics/v1/metrics.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/metrics/v1/metrics.proto deleted file mode 100644 index 19bb7ff8d53..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/metrics/v1/metrics.proto +++ /dev/null @@ -1,691 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.metrics.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.metrics.v1"; -option java_outer_classname = "MetricsProto"; -option go_package = "go.opentelemetry.io/proto/otlp/metrics/v1"; - -// MetricsData represents the metrics data that can be stored in a persistent -// storage, OR can be embedded by other protocols that transfer OTLP metrics -// data but do not implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message MetricsData { - // An array of ResourceMetrics. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceMetrics resource_metrics = 1; -} - -// A collection of ScopeMetrics from a Resource. -message ResourceMetrics { - reserved 1000; - - // The resource for the metrics in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of metrics that originate from a resource. - repeated ScopeMetrics scope_metrics = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_metrics" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Metrics produced by an Scope. -message ScopeMetrics { - // The instrumentation scope information for the metrics in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of metrics that originate from an instrumentation library. - repeated Metric metrics = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all metrics in the "metrics" field. - string schema_url = 3; -} - -// Defines a Metric which has one or more timeseries. The following is a -// brief summary of the Metric data model. For more details, see: -// -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md -// -// -// The data model and relation between entities is shown in the -// diagram below. Here, "DataPoint" is the term used to refer to any -// one of the specific data point value types, and "points" is the term used -// to refer to any one of the lists of points contained in the Metric. -// -// - Metric is composed of a metadata and data. -// - Metadata part contains a name, description, unit. -// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). -// - DataPoint contains timestamps, attributes, and one of the possible value type -// fields. -// -// Metric -// +------------+ -// |name | -// |description | -// |unit | +------------------------------------+ -// |data |---> |Gauge, Sum, Histogram, Summary, ... | -// +------------+ +------------------------------------+ -// -// Data [One of Gauge, Sum, Histogram, Summary, ...] -// +-----------+ -// |... | // Metadata about the Data. -// |points |--+ -// +-----------+ | -// | +---------------------------+ -// | |DataPoint 1 | -// v |+------+------+ +------+ | -// +-----+ ||label |label |...|label | | -// | 1 |-->||value1|value2|...|valueN| | -// +-----+ |+------+------+ +------+ | -// | . | |+-----+ | -// | . | ||value| | -// | . | |+-----+ | -// | . | +---------------------------+ -// | . | . -// | . | . -// | . | . -// | . | +---------------------------+ -// | . | |DataPoint M | -// +-----+ |+------+------+ +------+ | -// | M |-->||label |label |...|label | | -// +-----+ ||value1|value2|...|valueN| | -// |+------+------+ +------+ | -// |+-----+ | -// ||value| | -// |+-----+ | -// +---------------------------+ -// -// Each distinct type of DataPoint represents the output of a specific -// aggregation function, the result of applying the DataPoint's -// associated function of to one or more measurements. -// -// All DataPoint types have three common fields: -// - Attributes includes key-value pairs associated with the data point -// - TimeUnixNano is required, set to the end time of the aggregation -// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints -// having an AggregationTemporality field, as discussed below. -// -// Both TimeUnixNano and StartTimeUnixNano values are expressed as -// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. -// -// # TimeUnixNano -// -// This field is required, having consistent interpretation across -// DataPoint types. TimeUnixNano is the moment corresponding to when -// the data point's aggregate value was captured. -// -// Data points with the 0 value for TimeUnixNano SHOULD be rejected -// by consumers. -// -// # StartTimeUnixNano -// -// StartTimeUnixNano in general allows detecting when a sequence of -// observations is unbroken. This field indicates to consumers the -// start time for points with cumulative and delta -// AggregationTemporality, and it should be included whenever possible -// to support correct rate calculation. Although it may be omitted -// when the start time is truly unknown, setting StartTimeUnixNano is -// strongly encouraged. -message Metric { - reserved 4, 6, 8; - - // name of the metric. - string name = 1; - - // description of the metric, which can be used in documentation. - string description = 2; - - // unit in which the metric value is reported. Follows the format - // described by http://unitsofmeasure.org/ucum.html. - string unit = 3; - - // Data determines the aggregation type (if any) of the metric, what is the - // reported value type for the data points, as well as the relatationship to - // the time interval over which they are reported. - oneof data { - Gauge gauge = 5; - Sum sum = 7; - Histogram histogram = 9; - ExponentialHistogram exponential_histogram = 10; - Summary summary = 11; - } - - // Additional metadata attributes that describe the metric. [Optional]. - // Attributes are non-identifying. - // Consumers SHOULD NOT need to be aware of these attributes. - // These attributes MAY be used to encode information allowing - // for lossless roundtrip translation to / from another data model. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue metadata = 12; -} - -// Gauge represents the type of a scalar metric that always exports the -// "current value" for every data point. It should be used for an "unknown" -// aggregation. -// -// A Gauge does not support different aggregation temporalities. Given the -// aggregation is unknown, points cannot be combined using the same -// aggregation, regardless of aggregation temporalities. Therefore, -// AggregationTemporality is not included. Consequently, this also means -// "StartTimeUnixNano" is ignored for all data points. -message Gauge { - repeated NumberDataPoint data_points = 1; -} - -// Sum represents the type of a scalar metric that is calculated as a sum of all -// reported measurements over a time interval. -message Sum { - repeated NumberDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; - - // If "true" means that the sum is monotonic. - bool is_monotonic = 3; -} - -// Histogram represents the type of a metric that is calculated by aggregating -// as a Histogram of all reported measurements over a time interval. -message Histogram { - repeated HistogramDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; -} - -// ExponentialHistogram represents the type of a metric that is calculated by aggregating -// as a ExponentialHistogram of all reported double measurements over a time interval. -message ExponentialHistogram { - repeated ExponentialHistogramDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; -} - -// Summary metric data are used to convey quantile summaries, -// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) -// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) -// data type. These data points cannot always be merged in a meaningful way. -// While they can be useful in some applications, histogram data points are -// recommended for new applications. -message Summary { - repeated SummaryDataPoint data_points = 1; -} - -// AggregationTemporality defines how a metric aggregator reports aggregated -// values. It describes how those values relate to the time interval over -// which they are aggregated. -enum AggregationTemporality { - // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. - AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; - - // DELTA is an AggregationTemporality for a metric aggregator which reports - // changes since last report time. Successive metrics contain aggregation of - // values from continuous and non-overlapping intervals. - // - // The values for a DELTA metric are based only on the time interval - // associated with one measurement cycle. There is no dependency on - // previous measurements like is the case for CUMULATIVE metrics. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // DELTA metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0+1 to - // t_0+2 with a value of 2. - AGGREGATION_TEMPORALITY_DELTA = 1; - - // CUMULATIVE is an AggregationTemporality for a metric aggregator which - // reports changes since a fixed start time. This means that current values - // of a CUMULATIVE metric depend on all previous measurements since the - // start time. Because of this, the sender is required to retain this state - // in some form. If this state is lost or invalidated, the CUMULATIVE metric - // values MUST be reset and a new fixed start time following the last - // reported measurement time sent MUST be used. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // CUMULATIVE metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+2 with a value of 5. - // 9. The system experiences a fault and loses state. - // 10. The system recovers and resumes receiving at time=t_1. - // 11. A request is received, the system measures 1 request. - // 12. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_1 to - // t_0+1 with a value of 1. - // - // Note: Even though, when reporting changes since last report time, using - // CUMULATIVE is valid, it is not recommended. This may cause problems for - // systems that do not use start_time to determine when the aggregation - // value was reset (e.g. Prometheus). - AGGREGATION_TEMPORALITY_CUMULATIVE = 2; -} - -// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a -// bit-field representing 32 distinct boolean flags. Each flag defined in this -// enum is a bit-mask. To test the presence of a single flag in the flags of -// a data point, for example, use an expression like: -// -// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK -// -enum DataPointFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - DATA_POINT_FLAGS_DO_NOT_USE = 0; - - // This DataPoint is valid but has no recorded value. This value - // SHOULD be used to reflect explicitly missing data in a series, as - // for an equivalent to the Prometheus "staleness marker". - DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1; - - // Bits 2-31 are reserved for future use. -} - -// NumberDataPoint is a single data point in a timeseries that describes the -// time-varying scalar value of a metric. -message NumberDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // The value itself. A point is considered invalid when one of the recognized - // value fields is not present inside this oneof. - oneof value { - double as_double = 4; - sfixed64 as_int = 6; - } - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 5; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 8; -} - -// HistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Histogram. A Histogram contains summary statistics -// for a population of values, it may optionally contain the distribution of -// those values across a set of buckets. -// -// If the histogram contains the distribution of values, then both -// "explicit_bounds" and "bucket counts" fields must be defined. -// If the histogram does not contain the distribution of values, then both -// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and -// "sum" are known. -message HistogramDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // count is the number of values in the population. Must be non-negative. This - // value must be equal to the sum of the "count" fields in buckets if a - // histogram is provided. - fixed64 count = 4; - - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram - optional double sum = 5; - - // bucket_counts is an optional field contains the count values of histogram - // for each bucket. - // - // The sum of the bucket_counts must equal the value in the count field. - // - // The number of elements in bucket_counts array must be by one greater than - // the number of elements in explicit_bounds array. - repeated fixed64 bucket_counts = 6; - - // explicit_bounds specifies buckets with explicitly defined bounds for values. - // - // The boundaries for bucket at index i are: - // - // (-infinity, explicit_bounds[i]] for i == 0 - // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) - // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) - // - // The values in the explicit_bounds array must be strictly increasing. - // - // Histogram buckets are inclusive of their upper boundary, except the last - // bucket where the boundary is at infinity. This format is intentionally - // compatible with the OpenMetrics histogram definition. - repeated double explicit_bounds = 7; - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 8; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 10; - - // min is the minimum value over (start_time, end_time]. - optional double min = 11; - - // max is the maximum value over (start_time, end_time]. - optional double max = 12; -} - -// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains -// summary statistics for a population of values, it may optionally contain the -// distribution of those values across a set of buckets. -// -message ExponentialHistogramDataPoint { - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // count is the number of values in the population. Must be - // non-negative. This value must be equal to the sum of the "bucket_counts" - // values in the positive and negative Buckets plus the "zero_count" field. - fixed64 count = 4; - - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram - optional double sum = 5; - - // scale describes the resolution of the histogram. Boundaries are - // located at powers of the base, where: - // - // base = (2^(2^-scale)) - // - // The histogram bucket identified by `index`, a signed integer, - // contains values that are greater than (base^index) and - // less than or equal to (base^(index+1)). - // - // The positive and negative ranges of the histogram are expressed - // separately. Negative values are mapped by their absolute value - // into the negative range using the same scale as the positive range. - // - // scale is not restricted by the protocol, as the permissible - // values depend on the range of the data. - sint32 scale = 6; - - // zero_count is the count of values that are either exactly zero or - // within the region considered zero by the instrumentation at the - // tolerated degree of precision. This bucket stores values that - // cannot be expressed using the standard exponential formula as - // well as values that have been rounded to zero. - // - // Implementations MAY consider the zero bucket to have probability - // mass equal to (zero_count / count). - fixed64 zero_count = 7; - - // positive carries the positive range of exponential bucket counts. - Buckets positive = 8; - - // negative carries the negative range of exponential bucket counts. - Buckets negative = 9; - - // Buckets are a set of bucket counts, encoded in a contiguous array - // of counts. - message Buckets { - // Offset is the bucket index of the first entry in the bucket_counts array. - // - // Note: This uses a varint encoding as a simple form of compression. - sint32 offset = 1; - - // bucket_counts is an array of count values, where bucket_counts[i] carries - // the count of the bucket at index (offset+i). bucket_counts[i] is the count - // of values greater than base^(offset+i) and less than or equal to - // base^(offset+i+1). - // - // Note: By contrast, the explicit HistogramDataPoint uses - // fixed64. This field is expected to have many buckets, - // especially zeros, so uint64 has been selected to ensure - // varint encoding. - repeated uint64 bucket_counts = 2; - } - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 10; - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 11; - - // min is the minimum value over (start_time, end_time]. - optional double min = 12; - - // max is the maximum value over (start_time, end_time]. - optional double max = 13; - - // ZeroThreshold may be optionally set to convey the width of the zero - // region. Where the zero region is defined as the closed interval - // [-ZeroThreshold, ZeroThreshold]. - // When ZeroThreshold is 0, zero count bucket stores values that cannot be - // expressed using the standard exponential formula as well as values that - // have been rounded to zero. - double zero_threshold = 14; -} - -// SummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. -message SummaryDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // count is the number of values in the population. Must be non-negative. - fixed64 count = 4; - - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary - double sum = 5; - - // Represents the value at a given quantile of a distribution. - // - // To record Min and Max values following conventions are used: - // - The 1.0 quantile is equivalent to the maximum value observed. - // - The 0.0 quantile is equivalent to the minimum value observed. - // - // See the following issue for more context: - // https://github.com/open-telemetry/opentelemetry-proto/issues/125 - message ValueAtQuantile { - // The quantile of a distribution. Must be in the interval - // [0.0, 1.0]. - double quantile = 1; - - // The value at the given quantile of a distribution. - // - // Quantile values must NOT be negative. - double value = 2; - } - - // (Optional) list of values at different quantiles of the distribution calculated - // from the current snapshot. The quantiles must be strictly increasing. - repeated ValueAtQuantile quantile_values = 6; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 8; -} - -// A representation of an exemplar, which is a sample input measurement. -// Exemplars also hold information about the environment when the measurement -// was recorded, for example the span and trace ID of the active span when the -// exemplar was recorded. -message Exemplar { - reserved 1; - - // The set of key/value pairs that were filtered out by the aggregator, but - // recorded alongside the original measurement. Only key/value pairs that were - // filtered out by the aggregator should be included - repeated opentelemetry.proto.common.v1.KeyValue filtered_attributes = 7; - - // time_unix_nano is the exact time when this exemplar was recorded - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 2; - - // The value of the measurement that was recorded. An exemplar is - // considered invalid when one of the recognized value fields is not present - // inside this oneof. - oneof value { - double as_double = 3; - sfixed64 as_int = 6; - } - - // (Optional) Span ID of the exemplar trace. - // span_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - bytes span_id = 4; - - // (Optional) Trace ID of the exemplar trace. - // trace_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - bytes trace_id = 5; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto deleted file mode 100644 index b5b5b88fcef..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/pprofextended.proto +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2023, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// This file includes work covered by the following copyright and permission notices: -// -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Profile is a common stacktrace profile format. -// -// Measurements represented with this format should follow the -// following conventions: -// -// - Consumers should treat unset optional fields as if they had been -// set with their default value. -// -// - When possible, measurements should be stored in "unsampled" form -// that is most useful to humans. There should be enough -// information present to determine the original sampled values. -// -// - On-disk, the serialized proto must be gzip-compressed. -// -// - The profile is represented as a set of samples, where each sample -// references a sequence of locations, and where each location belongs -// to a mapping. -// - There is a N->1 relationship from sample.location_id entries to -// locations. For every sample.location_id entry there must be a -// unique Location with that index. -// - There is an optional N->1 relationship from locations to -// mappings. For every nonzero Location.mapping_id there must be a -// unique Mapping with that index. - -syntax = "proto3"; - -package opentelemetry.proto.profiles.v1experimental; - -import "opentelemetry/proto/common/v1/common.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.profiles.v1experimental"; -option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1experimental"; - -// Represents a complete profile, including sample types, samples, -// mappings to binaries, locations, functions, string table, and additional metadata. -message Profile { - // A description of the samples associated with each Sample.value. - // For a cpu profile this might be: - // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] - // For a heap profile, this might be: - // [["allocations","count"], ["space","bytes"]], - // If one of the values represents the number of events represented - // by the sample, by convention it should be at index 0 and use - // sample_type.unit == "count". - repeated ValueType sample_type = 1; - // The set of samples recorded in this profile. - repeated Sample sample = 2; - // Mapping from address ranges to the image/binary/library mapped - // into that address range. mapping[0] will be the main binary. - repeated Mapping mapping = 3; - // Locations referenced by samples via location_indices. - repeated Location location = 4; - // Array of locations referenced by samples. - repeated int64 location_indices = 15; - // Functions referenced by locations. - repeated Function function = 5; - // Lookup table for attributes. - repeated opentelemetry.proto.common.v1.KeyValue attribute_table = 16; - // Represents a mapping between Attribute Keys and Units. - repeated AttributeUnit attribute_units = 17; - // Lookup table for links. - repeated Link link_table = 18; - // A common table for strings referenced by various messages. - // string_table[0] must always be "". - repeated string string_table = 6; - // frames with Function.function_name fully matching the following - // regexp will be dropped from the samples, along with their successors. - int64 drop_frames = 7; // Index into string table. - // frames with Function.function_name fully matching the following - // regexp will be kept, even if it matches drop_frames. - int64 keep_frames = 8; // Index into string table. - - // The following fields are informational, do not affect - // interpretation of results. - - // Time of collection (UTC) represented as nanoseconds past the epoch. - int64 time_nanos = 9; - // Duration of the profile, if a duration makes sense. - int64 duration_nanos = 10; - // The kind of events between sampled occurrences. - // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] - ValueType period_type = 11; - // The number of events between sampled occurrences. - int64 period = 12; - // Free-form text associated with the profile. The text is displayed as is - // to the user by the tools that read profiles (e.g. by pprof). This field - // should not be used to store any machine-readable information, it is only - // for human-friendly content. The profile must stay functional if this field - // is cleaned. - repeated int64 comment = 13; // Indices into string table. - // Index into the string table of the type of the preferred sample - // value. If unset, clients should default to the last sample value. - int64 default_sample_type = 14; -} - -// Represents a mapping between Attribute Keys and Units. -message AttributeUnit { - // Index into string table. - int64 attribute_key = 1; - // Index into string table. - int64 unit = 2; -} - -// A pointer from a profile Sample to a trace Span. -// Connects a profile sample to a trace span, identified by unique trace and span IDs. -message Link { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - bytes trace_id = 1; - - // A unique identifier for the linked span. The ID is an 8-byte array. - bytes span_id = 2; -} - -// Specifies the method of aggregating metric values, either DELTA (change since last report) -// or CUMULATIVE (total since a fixed start time). -enum AggregationTemporality { - /* UNSPECIFIED is the default AggregationTemporality, it MUST not be used. */ - AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; - - /** DELTA is an AggregationTemporality for a profiler which reports - changes since last report time. Successive metrics contain aggregation of - values from continuous and non-overlapping intervals. - - The values for a DELTA metric are based only on the time interval - associated with one measurement cycle. There is no dependency on - previous measurements like is the case for CUMULATIVE metrics. - - For example, consider a system measuring the number of requests that - it receives and reports the sum of these requests every second as a - DELTA metric: - - 1. The system starts receiving at time=t_0. - 2. A request is received, the system measures 1 request. - 3. A request is received, the system measures 1 request. - 4. A request is received, the system measures 1 request. - 5. The 1 second collection cycle ends. A metric is exported for the - number of requests received over the interval of time t_0 to - t_0+1 with a value of 3. - 6. A request is received, the system measures 1 request. - 7. A request is received, the system measures 1 request. - 8. The 1 second collection cycle ends. A metric is exported for the - number of requests received over the interval of time t_0+1 to - t_0+2 with a value of 2. */ - AGGREGATION_TEMPORALITY_DELTA = 1; - - /** CUMULATIVE is an AggregationTemporality for a profiler which - reports changes since a fixed start time. This means that current values - of a CUMULATIVE metric depend on all previous measurements since the - start time. Because of this, the sender is required to retain this state - in some form. If this state is lost or invalidated, the CUMULATIVE metric - values MUST be reset and a new fixed start time following the last - reported measurement time sent MUST be used. - - For example, consider a system measuring the number of requests that - it receives and reports the sum of these requests every second as a - CUMULATIVE metric: - - 1. The system starts receiving at time=t_0. - 2. A request is received, the system measures 1 request. - 3. A request is received, the system measures 1 request. - 4. A request is received, the system measures 1 request. - 5. The 1 second collection cycle ends. A metric is exported for the - number of requests received over the interval of time t_0 to - t_0+1 with a value of 3. - 6. A request is received, the system measures 1 request. - 7. A request is received, the system measures 1 request. - 8. The 1 second collection cycle ends. A metric is exported for the - number of requests received over the interval of time t_0 to - t_0+2 with a value of 5. - 9. The system experiences a fault and loses state. - 10. The system recovers and resumes receiving at time=t_1. - 11. A request is received, the system measures 1 request. - 12. The 1 second collection cycle ends. A metric is exported for the - number of requests received over the interval of time t_1 to - t_1+1 with a value of 1. - - Note: Even though, when reporting changes since last report time, using - CUMULATIVE is valid, it is not recommended. */ - AGGREGATION_TEMPORALITY_CUMULATIVE = 2; -} - -// ValueType describes the type and units of a value, with an optional aggregation temporality. -message ValueType { - int64 type = 1; // Index into string table. - int64 unit = 2; // Index into string table. - - AggregationTemporality aggregation_temporality = 3; -} - -// Each Sample records values encountered in some program -// context. The program context is typically a stack trace, perhaps -// augmented with auxiliary information like the thread-id, some -// indicator of a higher level request being handled etc. -message Sample { - // The indices recorded here correspond to locations in Profile.location. - // The leaf is at location_index[0]. [deprecated, superseded by locations_start_index / locations_length] - repeated uint64 location_index = 1; - // locations_start_index along with locations_length refers to to a slice of locations in Profile.location. - // Supersedes location_index. - uint64 locations_start_index = 7; - // locations_length along with locations_start_index refers to a slice of locations in Profile.location. - // Supersedes location_index. - uint64 locations_length = 8; - // A 128bit id that uniquely identifies this stacktrace, globally. Index into string table. [optional] - uint32 stacktrace_id_index = 9; - // The type and unit of each value is defined by the corresponding - // entry in Profile.sample_type. All samples must have the same - // number of values, the same as the length of Profile.sample_type. - // When aggregating multiple samples into a single sample, the - // result has a list of values that is the element-wise sum of the - // lists of the originals. - repeated int64 value = 2; - // label includes additional context for this sample. It can include - // things like a thread id, allocation size, etc. - // - // NOTE: While possible, having multiple values for the same label key is - // strongly discouraged and should never be used. Most tools (e.g. pprof) do - // not have good (or any) support for multi-value labels. And an even more - // discouraged case is having a string label and a numeric label of the same - // name on a sample. Again, possible to express, but should not be used. - // [deprecated, superseded by attributes] - repeated Label label = 3; - // References to attributes in Profile.attribute_table. [optional] - repeated uint64 attributes = 10; - - // Reference to link in Profile.link_table. [optional] - uint64 link = 12; - - // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected - // to fall within the Profile's time range. [optional] - repeated uint64 timestamps_unix_nano = 13; -} - -// Provides additional context for a sample, -// such as thread ID or allocation size, with optional units. [deprecated] -message Label { - int64 key = 1; // Index into string table - - // At most one of the following must be present - int64 str = 2; // Index into string table - int64 num = 3; - - // Should only be present when num is present. - // Specifies the units of num. - // Use arbitrary string (for example, "requests") as a custom count unit. - // If no unit is specified, consumer may apply heuristic to deduce the unit. - // Consumers may also interpret units like "bytes" and "kilobytes" as memory - // units and units like "seconds" and "nanoseconds" as time units, - // and apply appropriate unit conversions to these. - int64 num_unit = 4; // Index into string table -} - -// Indicates the semantics of the build_id field. -enum BuildIdKind { - // Linker-generated build ID, stored in the ELF binary notes. - BUILD_ID_LINKER = 0; - // Build ID based on the content hash of the binary. Currently no particular - // hashing approach is standardized, so a given producer needs to define it - // themselves and thus unlike BUILD_ID_LINKER this kind of hash is producer-specific. - // We may choose to provide a standardized stable hash recommendation later. - BUILD_ID_BINARY_HASH = 1; -} - -// Describes the mapping of a binary in memory, including its address range, -// file offset, and metadata like build ID -message Mapping { - // Unique nonzero id for the mapping. [deprecated] - uint64 id = 1; - // Address at which the binary (or DLL) is loaded into memory. - uint64 memory_start = 2; - // The limit of the address range occupied by this mapping. - uint64 memory_limit = 3; - // Offset in the binary that corresponds to the first mapped address. - uint64 file_offset = 4; - // The object this entry is loaded from. This can be a filename on - // disk for the main binary and shared libraries, or virtual - // abstractions like "[vdso]". - int64 filename = 5; // Index into string table - // A string that uniquely identifies a particular program version - // with high probability. E.g., for binaries generated by GNU tools, - // it could be the contents of the .note.gnu.build-id field. - int64 build_id = 6; // Index into string table - // Specifies the kind of build id. See BuildIdKind enum for more details [optional] - BuildIdKind build_id_kind = 11; - // References to attributes in Profile.attribute_table. [optional] - repeated uint64 attributes = 12; - // The following fields indicate the resolution of symbolic info. - bool has_functions = 7; - bool has_filenames = 8; - bool has_line_numbers = 9; - bool has_inline_frames = 10; -} - -// Describes function and line table debug information. -message Location { - // Unique nonzero id for the location. A profile could use - // instruction addresses or any integer sequence as ids. [deprecated] - uint64 id = 1; - // The index of the corresponding profile.Mapping for this location. - // It can be unset if the mapping is unknown or not applicable for - // this profile type. - uint64 mapping_index = 2; - // The instruction address for this location, if available. It - // should be within [Mapping.memory_start...Mapping.memory_limit] - // for the corresponding mapping. A non-leaf address may be in the - // middle of a call instruction. It is up to display tools to find - // the beginning of the instruction if necessary. - uint64 address = 3; - // Multiple line indicates this location has inlined functions, - // where the last entry represents the caller into which the - // preceding entries were inlined. - // - // E.g., if memcpy() is inlined into printf: - // line[0].function_name == "memcpy" - // line[1].function_name == "printf" - repeated Line line = 4; - // Provides an indication that multiple symbols map to this location's - // address, for example due to identical code folding by the linker. In that - // case the line information above represents one of the multiple - // symbols. This field must be recomputed when the symbolization state of the - // profile changes. - bool is_folded = 5; - - // Type of frame (e.g. kernel, native, python, hotspot, php). Index into string table. - uint32 type_index = 6; - - // References to attributes in Profile.attribute_table. [optional] - repeated uint64 attributes = 7; -} - -// Details a specific line in a source code, linked to a function. -message Line { - // The index of the corresponding profile.Function for this line. - uint64 function_index = 1; - // Line number in source code. - int64 line = 2; - // Column number in source code. - int64 column = 3; -} - -// Describes a function, including its human-readable name, system name, -// source file, and starting line number in the source. -message Function { - // Unique nonzero id for the function. [deprecated] - uint64 id = 1; - // Name of the function, in human-readable form if available. - int64 name = 2; // Index into string table - // Name of the function, as identified by the system. - // For instance, it can be a C++ mangled name. - int64 system_name = 3; // Index into string table - // Source file containing the function. - int64 filename = 4; // Index into string table - // Line number in source file. - int64 start_line = 5; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto deleted file mode 100644 index 84b0108f86a..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/profiles/v1experimental/profiles.proto +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2023, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.profiles.v1experimental; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; -import "opentelemetry/proto/profiles/v1experimental/pprofextended.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.profiles.v1experimental"; -option java_outer_classname = "ProfilesProto"; -option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1experimental"; - -// Relationships Diagram -// -// ┌──────────────────┐ LEGEND -// │ ProfilesData │ -// └──────────────────┘ ─────▶ embedded -// │ -// │ 1-n ─────▷ referenced by index -// ▼ -// ┌──────────────────┐ -// │ ResourceProfiles │ -// └──────────────────┘ -// │ -// │ 1-n -// ▼ -// ┌──────────────────┐ -// │ ScopeProfiles │ -// └──────────────────┘ -// │ -// │ 1-n -// ▼ -// ┌──────────────────┐ -// │ ProfileContainer │ -// └──────────────────┘ -// │ -// │ 1-1 -// ▼ -// ┌──────────────────┐ -// │ Profile │ -// └──────────────────┘ -// │ n-1 -// │ 1-n ┌───────────────────────────────────────┐ -// ▼ │ ▽ -// ┌──────────────────┐ 1-n ┌──────────────┐ ┌──────────┐ -// │ Sample │ ──────▷ │ KeyValue │ │ Link │ -// └──────────────────┘ └──────────────┘ └──────────┘ -// │ 1-n △ △ -// │ 1-n ┌─────────────────┘ │ 1-n -// ▽ │ │ -// ┌──────────────────┐ n-1 ┌──────────────┐ -// │ Location │ ──────▷ │ Mapping │ -// └──────────────────┘ └──────────────┘ -// │ -// │ 1-n -// ▼ -// ┌──────────────────┐ -// │ Line │ -// └──────────────────┘ -// │ -// │ 1-1 -// ▽ -// ┌──────────────────┐ -// │ Function │ -// └──────────────────┘ -// - -// ProfilesData represents the profiles data that can be stored in persistent storage, -// OR can be embedded by other protocols that transfer OTLP profiles data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message ProfilesData { - // An array of ResourceProfiles. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceProfiles resource_profiles = 1; -} - - -// A collection of ScopeProfiles from a Resource. -message ResourceProfiles { - reserved 1000; - - // The resource for the profiles in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeProfiles that originate from a resource. - repeated ScopeProfiles scope_profiles = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_profiles" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of ProfileContainers produced by an InstrumentationScope. -message ScopeProfiles { - // The instrumentation scope information for the profiles in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of ProfileContainers that originate from an instrumentation scope. - repeated ProfileContainer profiles = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all profiles in the "profiles" field. - string schema_url = 3; -} - -// A ProfileContainer represents a single profile. It wraps pprof profile with OpenTelemetry specific metadata. -message ProfileContainer { - // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with - // all zeroes is considered invalid. - // - // This field is required. - bytes profile_id = 1; - - // start_time_unix_nano is the start time of the profile. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 start_time_unix_nano = 2; - - // end_time_unix_nano is the end time of the profile. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 end_time_unix_nano = 3; - - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; - - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - uint32 dropped_attributes_count = 5; - - // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] - string original_payload_format = 6; - - // Original payload can be stored in this field. This can be useful for users who want to get the original payload. - // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. - // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. - // If the original payload is in pprof format, it SHOULD not be included in this field. - // The field is optional, however if it is present `profile` MUST be present and contain the same profiling information. - bytes original_payload = 7; - - // This is a reference to a pprof profile. Required, even when original_payload is present. - opentelemetry.proto.profiles.v1experimental.Profile profile = 8; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/resource/v1/resource.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/resource/v1/resource.proto deleted file mode 100644 index 6637560bc35..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/resource/v1/resource.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.resource.v1; - -import "opentelemetry/proto/common/v1/common.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Resource.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.resource.v1"; -option java_outer_classname = "ResourceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/resource/v1"; - -// Resource information. -message Resource { - // Set of attributes that describe the resource. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; - - // dropped_attributes_count is the number of dropped attributes. If the value is 0, then - // no attributes were dropped. - uint32 dropped_attributes_count = 2; -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/trace/v1/trace.proto b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/trace/v1/trace.proto deleted file mode 100644 index 5cb2f3ce1cd..00000000000 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/Implementation/opentelemetry/proto/trace/v1/trace.proto +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.trace.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Trace.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.trace.v1"; -option java_outer_classname = "TraceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/trace/v1"; - -// TracesData represents the traces data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP traces data but do -// not implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message TracesData { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceSpans resource_spans = 1; -} - -// A collection of ScopeSpans from a Resource. -message ResourceSpans { - reserved 1000; - - // The resource for the spans in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeSpans that originate from a resource. - repeated ScopeSpans scope_spans = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_spans" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Spans produced by an InstrumentationScope. -message ScopeSpans { - // The instrumentation scope information for the spans in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of Spans that originate from an instrumentation scope. - repeated Span spans = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all spans and span events in the "spans" field. - string schema_url = 3; -} - -// A Span represents a single operation performed by a single component of the system. -// -// The next available field id is 17. -message Span { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - bytes trace_id = 1; - - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - bytes span_id = 2; - - // trace_state conveys information about request position in multiple distributed tracing graphs. - // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header - // See also https://github.com/w3c/distributed-tracing for more details about this field. - string trace_state = 3; - - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - bytes parent_span_id = 4; - - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether a span's parent - // is remote. The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // When creating span messages, if the message is logically forwarded from another source - // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD - // be copied as-is. If creating from a source that does not have an equivalent flags field - // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST - // be set to zero. - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // - // [Optional]. - fixed32 flags = 16; - - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // Empty value is equivalent to an unknown span name. - // - // This field is required. - string name = 5; - - // SpanKind is the type of span. Can be used to specify additional relationships between spans - // in addition to a parent/child relationship. - enum SpanKind { - // Unspecified. Do NOT use as default. - // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. - SPAN_KIND_UNSPECIFIED = 0; - - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. - SPAN_KIND_INTERNAL = 1; - - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - SPAN_KIND_SERVER = 2; - - // Indicates that the span describes a request to some remote service. - SPAN_KIND_CLIENT = 3; - - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. - SPAN_KIND_PRODUCER = 4; - - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. - SPAN_KIND_CONSUMER = 5; - } - - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - SpanKind kind = 6; - - // start_time_unix_nano is the start time of the span. On the client side, this is the time - // kept by the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 start_time_unix_nano = 7; - - // end_time_unix_nano is the end time of the span. On the client side, this is the time - // kept by the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 end_time_unix_nano = 8; - - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "example.com/myattribute": true - // "example.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; - - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - uint32 dropped_attributes_count = 10; - - // Event is a time-stamped annotation of the span, consisting of user-supplied - // text description and key-value pairs. - message Event { - // time_unix_nano is the time the event occurred. - fixed64 time_unix_nano = 1; - - // name of the event. - // This field is semantically required to be set to non-empty string. - string name = 2; - - // attributes is a collection of attribute key/value pairs on the event. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 3; - - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - uint32 dropped_attributes_count = 4; - } - - // events is a collection of Event items. - repeated Event events = 11; - - // dropped_events_count is the number of dropped events. If the value is 0, then no - // events were dropped. - uint32 dropped_events_count = 12; - - // A pointer from the current span to another span in the same trace or in a - // different trace. For example, this can be used in batching operations, - // where a single batch handler processes multiple requests from different - // traces or when the handler receives a request from a different project. - message Link { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - bytes trace_id = 1; - - // A unique identifier for the linked span. The ID is an 8-byte array. - bytes span_id = 2; - - // The trace_state associated with the link. - string trace_state = 3; - - // attributes is a collection of attribute key/value pairs on the link. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; - - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - uint32 dropped_attributes_count = 5; - - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether the link is remote. - // The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. - // - // [Optional]. - fixed32 flags = 6; - } - - // links is a collection of Links, which are references from this span to a span - // in the same or different trace. - repeated Link links = 13; - - // dropped_links_count is the number of dropped links after the maximum size was - // enforced. If this value is 0, then no links were dropped. - uint32 dropped_links_count = 14; - - // An optional final status for this span. Semantically when Status isn't set, it means - // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). - Status status = 15; -} - -// The Status type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. -message Status { - reserved 1; - - // A developer-facing human readable error message. - string message = 2; - - // For the semantics of status codes see - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status - enum StatusCode { - // The default status. - STATUS_CODE_UNSET = 0; - // The Span has been validated by an Application developer or Operator to - // have completed successfully. - STATUS_CODE_OK = 1; - // The Span contains an error. - STATUS_CODE_ERROR = 2; - }; - - // The status code. - StatusCode code = 3; -} - -// SpanFlags represents constants used to interpret the -// Span.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) -// -// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. -// -// Note that Span flags were introduced in version 1.1 of the -// OpenTelemetry protocol. Older Span producers do not set this -// field, consequently consumers should not rely on the absence of a -// particular flag bit to indicate the presence of a particular feature. -enum SpanFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - SPAN_FLAGS_DO_NOT_USE = 0; - - // Bits 0-7 are used for trace flags. - SPAN_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; - - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. - SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK = 0x00000100; - SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK = 0x00000200; - - // Bits 10-31 are reserved for future use. -} diff --git a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests.csproj b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests.csproj index 6f1833c910b..59da00ba6aa 100644 --- a/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests.csproj +++ b/test/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests/OpenTelemetry.Exporter.OpenTelemetryProtocol.Tests.csproj @@ -2,7 +2,6 @@ $(TargetFrameworksForTests) - $(NoWarn);SA1518;SA1210 @@ -55,8 +54,8 @@ - - Implementation + + $(RepoRoot)\src\Shared\Proto From d07f7060faeae95aad8b3c95008f9c6dae204ce0 Mon Sep 17 00:00:00 2001 From: Mikel Blanchard Date: Mon, 2 Dec 2024 14:07:32 -0800 Subject: [PATCH 08/10] Lint. --- src/Shared/Proto/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Shared/Proto/README.md b/src/Shared/Proto/README.md index c7b2db9302a..fdd15e95f94 100644 --- a/src/Shared/Proto/README.md +++ b/src/Shared/Proto/README.md @@ -2,4 +2,4 @@ `.proto` files are copied from the [`opentelemetry-proto`](https://github.com/open-telemetry/opentelemetry-proto/commit/1a931b4b57c34e7fd8f7dddcaa9b7587840e9c08) -repo. +repo. From 41dc3625c1ef1aac524c127e15db359b60ab3577 Mon Sep 17 00:00:00 2001 From: Mikel Blanchard Date: Mon, 2 Dec 2024 14:09:44 -0800 Subject: [PATCH 09/10] Clean up. --- test/Benchmarks/Benchmarks.csproj | 1 - 1 file changed, 1 deletion(-) diff --git a/test/Benchmarks/Benchmarks.csproj b/test/Benchmarks/Benchmarks.csproj index cadc5e4ffa5..34d74cb0bd7 100644 --- a/test/Benchmarks/Benchmarks.csproj +++ b/test/Benchmarks/Benchmarks.csproj @@ -3,7 +3,6 @@ Exe $(TargetFrameworksForTests) - From c54a5556b182320c2b56289e59206a81c490f6ee Mon Sep 17 00:00:00 2001 From: Rajkumar Rangaraj Date: Mon, 2 Dec 2024 15:49:44 -0800 Subject: [PATCH 10/10] nits --- .../OtlpExporterPersistentStorageTransmissionHandler.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterPersistentStorageTransmissionHandler.cs b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterPersistentStorageTransmissionHandler.cs index df75750d9d6..c40d7577908 100644 --- a/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterPersistentStorageTransmissionHandler.cs +++ b/src/OpenTelemetry.Exporter.OpenTelemetryProtocol/Implementation/Transmission/OtlpExporterPersistentStorageTransmissionHandler.cs @@ -31,7 +31,7 @@ internal OtlpExporterPersistentStorageTransmissionHandler(PersistentBlobProvider this.thread = new Thread(this.RetryStoredRequests) { - Name = $"OtlpExporter Persistent Retry Storage", + Name = "OtlpExporter Persistent Retry Storage", IsBackground = true, };