diff --git a/Cargo.lock b/Cargo.lock index 554ef652b..e87727e47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1993,6 +1993,12 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + [[package]] name = "futures-util" version = "0.3.30" @@ -2685,6 +2691,12 @@ dependencies = [ "libc", ] +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + [[package]] name = "matchers" version = "0.1.0" @@ -3008,6 +3020,52 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "opentelemetry" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab70038c28ed37b97d8ed414b6429d343a8bbf44c9f79ec854f3a643029ba6d7" +dependencies = [ + "futures-core", + "futures-sink", + "js-sys", + "pin-project-lite", + "thiserror", + "tracing", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6e05acbfada5ec79023c85368af14abd0b307c015e9064d249b2a950ef459a6" +dependencies = [ + "hex", + "opentelemetry", + "opentelemetry_sdk", + "prost", + "serde", + "tonic", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "231e9d6ceef9b0b2546ddf52335785ce41252bc7474ee8ba05bfad277be13ab8" +dependencies = [ + "async-trait", + "futures-channel", + "futures-executor", + "futures-util", + "glob", + "opentelemetry", + "percent-encoding", + "rand", + "serde_json", + "thiserror", +] + [[package]] name = "ordered-float" version = "2.10.1" @@ -3108,6 +3166,7 @@ dependencies = [ "actix-web-static-files", "anyhow", "argon2", + "arrow", "arrow-array", "arrow-flight", "arrow-ipc", @@ -3141,12 +3200,14 @@ dependencies = [ "humantime-serde", "itertools 0.13.0", "lazy_static", + "maplit", "mime", "nom", "num_cpus", "object_store", "once_cell", "openid", + "opentelemetry-proto", "parquet", "path-clean", "prometheus", @@ -3158,6 +3219,7 @@ dependencies = [ "regex", "relative-path", "reqwest 0.11.27", + "rstest", "rustls 0.22.4", "rustls-pemfile 2.1.2", "semver", @@ -3823,6 +3885,36 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rstest" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a2c585be59b6b5dd66a9d2084aa1d8bd52fbdb806eafdeffb52791147862035" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version", +] + +[[package]] +name = "rstest_macros" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "825ea780781b15345a146be27eaefb05085e337e869bff01b4306a4fd4a9ad5a" +dependencies = [ + "cfg-if", + "glob", + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version", + "syn 2.0.79", + "unicode-ident", +] + [[package]] name = "rustc-demangle" version = "0.1.23" diff --git a/Cargo.toml b/Cargo.toml index b33238052..26d35460d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -106,6 +106,7 @@ prost = "0.13.3" prometheus-parse = "0.2.5" sha2 = "0.10.8" tracing = "0.1.41" +opentelemetry-proto = "0.27.0" [build-dependencies] cargo_toml = "0.20.1" diff --git a/src/handlers/http/modal/utils/ingest_utils.rs b/src/handlers/http/modal/utils/ingest_utils.rs index 62344d551..12afc33fc 100644 --- a/src/handlers/http/modal/utils/ingest_utils.rs +++ b/src/handlers/http/modal/utils/ingest_utils.rs @@ -16,16 +16,15 @@ * */ -use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, -}; - use actix_web::HttpRequest; use arrow_schema::Field; use bytes::Bytes; use chrono::{DateTime, NaiveDateTime, Utc}; use serde_json::Value; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; use crate::{ event::{ diff --git a/src/handlers/http/otel.rs b/src/handlers/http/otel.rs index de4b2658c..d199d11ad 100644 --- a/src/handlers/http/otel.rs +++ b/src/handlers/http/otel.rs @@ -15,83 +15,92 @@ * along with this program. If not, see . * */ + pub mod logs; pub mod metrics; -#[allow(clippy::all)] -pub mod proto; pub mod traces; -use proto::common::v1::KeyValue; +use opentelemetry_proto::tonic::common::v1::{any_value::Value as OtelValue, AnyValue, KeyValue}; use serde_json::Value; use std::collections::BTreeMap; // Value can be one of types - String, Bool, Int, Double, ArrayValue, AnyValue, KeyValueList, Byte -pub fn collect_json_from_any_value( - key: &String, - value: super::otel::proto::common::v1::Value, -) -> BTreeMap { +pub fn collect_json_from_value(key: &String, value: OtelValue) -> BTreeMap { let mut value_json: BTreeMap = BTreeMap::new(); - insert_if_some(&mut value_json, key, &value.str_val); - insert_bool_if_some(&mut value_json, key, &value.bool_val); - insert_if_some(&mut value_json, key, &value.int_val); - insert_number_if_some(&mut value_json, key, &value.double_val); - - //ArrayValue is a vector of AnyValue - //traverse by recursively calling the same function - if value.array_val.is_some() { - let array_val = value.array_val.as_ref().unwrap(); - let values = &array_val.values; - for value in values { - let array_value_json = collect_json_from_any_value(key, value.clone()); - for key in array_value_json.keys() { - value_json.insert( - format!( - "{}_{}", - key.to_owned(), - value_to_string(array_value_json[key].to_owned()) - ), - array_value_json[key].to_owned(), - ); + match value { + OtelValue::StringValue(str_val) => { + value_json.insert(key.to_string(), Value::String(str_val)); + } + OtelValue::BoolValue(bool_val) => { + value_json.insert(key.to_string(), Value::Bool(bool_val)); + } + OtelValue::IntValue(int_val) => { + value_json.insert(key.to_string(), Value::String(int_val.to_string())); + } + OtelValue::DoubleValue(double_val) => { + if let Some(number) = serde_json::Number::from_f64(double_val) { + value_json.insert(key.to_string(), Value::Number(number)); } } - } - - //KeyValueList is a vector of KeyValue - //traverse through each element in the vector - if value.kv_list_val.is_some() { - let kv_list_val = value.kv_list_val.unwrap(); - for key_value in kv_list_val.values { - let value = key_value.value; - if value.is_some() { - let value = value.unwrap(); - let key_value_json = collect_json_from_any_value(key, value); - - for key in key_value_json.keys() { + OtelValue::ArrayValue(array_val) => { + let values = &array_val.values; + for value in values { + let array_value_json = collect_json_from_anyvalue(key, value.clone()); + for key in array_value_json.keys() { value_json.insert( format!( - "{}_{}_{}", + "{}_{}", key.to_owned(), - key_value.key, - value_to_string(key_value_json[key].to_owned()) + value_to_string(array_value_json[key].to_owned()) ), - key_value_json[key].to_owned(), + array_value_json[key].to_owned(), ); } } } + OtelValue::KvlistValue(kv_list_val) => { + for key_value in kv_list_val.values { + let value = key_value.value; + if value.is_some() { + let value = value.unwrap(); + let key_value_json = collect_json_from_anyvalue(key, value.clone()); + + for key in key_value_json.keys() { + value_json.insert( + format!( + "{}_{}_{}", + key.to_owned(), + key_value.key, + value_to_string(key_value_json[key].to_owned()) + ), + key_value_json[key].to_owned(), + ); + } + } + } + } + OtelValue::BytesValue(bytes_val) => { + value_json.insert( + key.to_string(), + Value::String(String::from_utf8_lossy(&bytes_val).to_string()), + ); + } } - insert_if_some(&mut value_json, key, &value.bytes_val); value_json } +pub fn collect_json_from_anyvalue(key: &String, value: AnyValue) -> BTreeMap { + collect_json_from_value(key, value.value.unwrap()) +} + //traverse through Value by calling function ollect_json_from_any_value pub fn collect_json_from_values( - values: &Option, + values: &Option, key: &String, ) -> BTreeMap { let mut value_json: BTreeMap = BTreeMap::new(); for value in values.iter() { - value_json = collect_json_from_any_value(key, value.clone()); + value_json = collect_json_from_anyvalue(key, value.clone()); } value_json @@ -142,11 +151,9 @@ pub fn insert_bool_if_some(map: &mut BTreeMap, key: &str, option: } } -pub fn insert_attributes(map: &mut BTreeMap, attributes: &Option>) { - if let Some(attrs) = attributes { - let attributes_json = flatten_attributes(attrs); - for (key, value) in attributes_json { - map.insert(key, value); - } +pub fn insert_attributes(map: &mut BTreeMap, attributes: &Vec) { + let attributes_json = flatten_attributes(attributes); + for (key, value) in attributes_json { + map.insert(key, value); } } diff --git a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.common.v1.rs b/src/handlers/http/otel/compiled_protos/opentelemetry.proto.common.v1.rs deleted file mode 100644 index bc40d0720..000000000 --- a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.common.v1.rs +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Parseable Server (C) 2022 - 2024 Parseable, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - - // This file was generated by protoc-gen-rust-protobuf. The file was edited after the generation. - // All the repeated fields were changed to Option> and the `oneof` fields were changed to Option. - - use serde::{Deserialize, Serialize}; - #[derive(Serialize, Deserialize, Debug, Clone)] - /// AnyValue is used to represent any type of attribute value. AnyValue may contain a - /// primitive value such as a string or integer or it may contain an arbitrary nested - /// object containing arrays, key-value lists and primitives. - pub struct AnyValue { - /// The value is one of the listed fields. It is valid for all values to be unspecified - /// in which case this AnyValue is considered to be "empty". - pub value: Value, - } - - #[derive(Serialize, Deserialize, Debug, Clone)] - pub struct Value { - #[serde(rename = "stringValue")] - pub str_val: Option, - #[serde(rename = "boolValue")] - pub bool_val: Option, - #[serde(rename = "intValue")] - pub int_val: Option, - #[serde(rename = "doubleValue")] - pub double_val: Option, - #[serde(rename = "arrayValue")] - pub array_val: Option, - #[serde(rename = "keyVauleList")] - pub kv_list_val: Option, - #[serde(rename = "bytesValue")] - pub bytes_val: Option, - } - - #[derive(Serialize, Deserialize, Debug, Clone)] - /// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message - /// since oneof in AnyValue does not allow repeated fields. - pub struct ArrayValue { - /// Array of values. The array may be empty (contain 0 elements). - pub values: Vec, - } - - #[derive(Serialize, Deserialize, Debug, Clone)] - /// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message - /// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need - /// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to - /// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches - /// are semantically equivalent. - pub struct KeyValueList { - /// A collection of key/value pairs of key-value pairs. The list may be empty (may - /// contain 0 elements). - /// The keys MUST be unique (it is not allowed to have more than one - /// value with the same key). - pub values: Vec, - } - - #[derive(Serialize, Deserialize, Debug, Clone)] - /// KeyValue is a key-value pair that is used to store Span attributes, Link - /// attributes, etc. - pub struct KeyValue { - pub key: String, - pub value: Option, - } - - #[derive(Serialize, Deserialize, Debug)] - /// InstrumentationScope is a message representing the instrumentation scope information - /// such as the fully qualified name and version. - pub struct InstrumentationScope { - /// An empty instrumentation scope name means the name is unknown. - pub name: Option, - pub version: Option, - /// Additional attributes that describe the scope. \[Optional\]. - /// Attribute keys MUST be unique (it is not allowed to have more than one - /// attribute with the same key). - pub attributes: Option>, - #[serde(rename = "droppedAttributesCount")] - pub dropped_attributes_count: Option, - } - \ No newline at end of file diff --git a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.logs.v1.rs b/src/handlers/http/otel/compiled_protos/opentelemetry.proto.logs.v1.rs deleted file mode 100644 index dc63286e3..000000000 --- a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.logs.v1.rs +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Parseable Server (C) 2022 - 2024 Parseable, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -// This file was generated by protoc-gen-rust-protobuf. The file was edited after the generation. - // All the repeated fields were changed to Option>. - - use crate::handlers::http::otel::proto::common::v1::InstrumentationScope; - use crate::handlers::http::otel::proto::common::v1::KeyValue; - use crate::handlers::http::otel::proto::common::v1::Value; - use crate::handlers::http::otel::proto::resource::v1::Resource; - use serde::{Deserialize, Serialize}; - - #[derive(Serialize, Deserialize, Debug)] - /// LogsData represents the logs data that can be stored in a persistent storage, - /// OR can be embedded by other protocols that transfer OTLP logs data but do not - /// implement the OTLP protocol. - /// - /// The main difference between this message and collector protocol is that - /// in this message there will not be any "control" or "metadata" specific to - /// OTLP protocol. - /// - /// When new fields are added into this message, the OTLP request MUST be updated - /// as well. - pub struct LogsData { - /// An array of ResourceLogs. - /// For data coming from a single resource this array will typically contain - /// one element. Intermediary nodes that receive data from multiple origins - /// typically batch the data before forwarding further and in that case this - /// array will contain multiple elements. - #[serde(rename = "resourceLogs")] - pub resource_logs: Option>, - } - - #[derive(Serialize, Deserialize, Debug)] - /// A collection of ScopeLogs from a Resource. - pub struct ResourceLogs { - /// The resource for the logs in this message. - /// If this field is not set then resource info is unknown. - pub resource: Option, - /// A list of ScopeLogs that originate from a resource. - #[serde(rename = "scopeLogs")] - pub scope_logs: Option>, - /// This schema_url applies to the data in the "resource" field. It does not apply - /// to the data in the "scope_logs" field which have their own schema_url field. - #[serde(rename = "schemaUrl")] - pub schema_url: Option, - } - - #[derive(Serialize, Deserialize, Debug)] - /// A collection of Logs produced by a Scope. - pub struct ScopeLogs { - /// The instrumentation scope information for the logs in this message. - /// Semantically when InstrumentationScope isn't set, it is equivalent with - /// an empty instrumentation scope name (unknown). - pub scope: Option, - /// A list of log records. - #[serde(rename = "logRecords")] - pub log_records: Vec, - /// This schema_url applies to all logs in the "logs" field. - #[serde(rename = "schemaUrl")] - pub schema_url: Option, - } - - #[derive(Serialize, Deserialize, Debug)] - /// A log record according to OpenTelemetry Log Data Model: - /// - pub struct LogRecord { - /// time_unix_nano is the time when the event occurred. - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - /// Value of 0 indicates unknown or missing timestamp. - #[serde(rename = "timeUnixNano")] - pub time_unix_nano: Option, - /// Time when the event was observed by the collection system. - /// For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) - /// this timestamp is typically set at the generation time and is equal to Timestamp. - /// For events originating externally and collected by OpenTelemetry (e.g. using - /// Collector) this is the time when OpenTelemetry's code observed the event measured - /// by the clock of the OpenTelemetry code. This field MUST be set once the event is - /// observed by OpenTelemetry. - /// - /// For converting OpenTelemetry log data to formats that support only one timestamp or - /// when receiving OpenTelemetry log data by recipients that support only one timestamp - /// internally the following logic is recommended: - /// - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. - /// - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - /// Value of 0 indicates unknown or missing timestamp. - #[serde(rename = "observedTimeUnixNano")] - pub observed_time_unix_nano: Option, - /// Numerical value of the severity, normalized to values described in Log Data Model. - /// \[Optional\]. - #[serde(rename = "severityNumber")] - pub severity_number: Option, - /// The severity text (also known as log level). The original string representation as - /// it is known at the source. \[Optional\]. - #[serde(rename = "severityText")] - pub severity_text: Option, - /// A value containing the body of the log record. Can be for example a human-readable - /// string message (including multi-line) describing the event in a free form or it can - /// be a structured data composed of arrays and maps of other values. \[Optional\]. - pub body: Option, - /// Additional attributes that describe the specific event occurrence. \[Optional\]. - /// Attribute keys MUST be unique (it is not allowed to have more than one - /// attribute with the same key). - pub attributes: Option>, - #[serde(rename = "droppedAttributesCount")] - pub dropped_attributes_count: Option, - /// Flags, a bit field. 8 least significant bits are the trace flags as - /// defined in W3C Trace Context specification. 24 most significant bits are reserved - /// and must be set to 0. Readers must not assume that 24 most significant bits - /// will be zero and must correctly mask the bits when reading 8-bit trace flag (use - /// flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). \[Optional\]. - pub flags: Option, - /// A unique identifier for a trace. All logs from the same trace share - /// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - /// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - /// is zero-length and thus is also invalid). - /// - /// This field is optional. - /// - /// The receivers SHOULD assume that the log record is not associated with a - /// trace if any of the following is true: - /// - the field is not present, - /// - the field contains an invalid value. - #[serde(rename = "traceId")] - pub trace_id: Option, - /// A unique identifier for a span within a trace, assigned when the span - /// is created. The ID is an 8-byte array. An ID with all zeroes OR of length - /// other than 8 bytes is considered invalid (empty string in OTLP/JSON - /// is zero-length and thus is also invalid). - /// - /// This field is optional. If the sender specifies a valid span_id then it SHOULD also - /// specify a valid trace_id. - /// - /// The receivers SHOULD assume that the log record is not associated with a - /// span if any of the following is true: - /// - the field is not present, - /// - the field contains an invalid value. - #[serde(rename = "spanId")] - pub span_id: Option, - } - /// Possible values for LogRecord.SeverityNumber. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] - #[repr(i32)] - pub enum SeverityNumber { - /// UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. - Unspecified = 0, - Trace = 1, - Trace2 = 2, - Trace3 = 3, - Trace4 = 4, - Debug = 5, - Debug2 = 6, - Debug3 = 7, - Debug4 = 8, - Info = 9, - Info2 = 10, - Info3 = 11, - Info4 = 12, - Warn = 13, - Warn2 = 14, - Warn3 = 15, - Warn4 = 16, - Error = 17, - Error2 = 18, - Error3 = 19, - Error4 = 20, - Fatal = 21, - Fatal2 = 22, - Fatal3 = 23, - Fatal4 = 24, - } - impl SeverityNumber { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(severity_number: i32) -> &'static str { - match severity_number { - 0 => "SEVERITY_NUMBER_UNSPECIFIED", - 1 => "SEVERITY_NUMBER_TRACE", - 2 => "SEVERITY_NUMBER_TRACE2", - 3 => "SEVERITY_NUMBER_TRACE3", - 4 => "SEVERITY_NUMBER_TRACE4", - 5 => "SEVERITY_NUMBER_DEBUG", - 6 => "SEVERITY_NUMBER_DEBUG2", - 7 => "SEVERITY_NUMBER_DEBUG3", - 8 => "SEVERITY_NUMBER_DEBUG4", - 9 => "SEVERITY_NUMBER_INFO", - 10 => "SEVERITY_NUMBER_INFO2", - 11 => "SEVERITY_NUMBER_INFO3", - 12 => "SEVERITY_NUMBER_INFO4", - 13 => "SEVERITY_NUMBER_WARN", - 14 => "SEVERITY_NUMBER_WARN2", - 15 => "SEVERITY_NUMBER_WARN3", - 16 => "SEVERITY_NUMBER_WARN4", - 17 => "SEVERITY_NUMBER_ERROR", - 18 => "SEVERITY_NUMBER_ERROR2", - 19 => "SEVERITY_NUMBER_ERROR3", - 20 => "SEVERITY_NUMBER_ERROR4", - 21 => "SEVERITY_NUMBER_FATAL", - 22 => "SEVERITY_NUMBER_FATAL2", - 23 => "SEVERITY_NUMBER_FATAL3", - 24 => "SEVERITY_NUMBER_FATAL4", - _ => "Invalid severity number", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "SEVERITY_NUMBER_UNSPECIFIED" => Some(Self::Unspecified), - "SEVERITY_NUMBER_TRACE" => Some(Self::Trace), - "SEVERITY_NUMBER_TRACE2" => Some(Self::Trace2), - "SEVERITY_NUMBER_TRACE3" => Some(Self::Trace3), - "SEVERITY_NUMBER_TRACE4" => Some(Self::Trace4), - "SEVERITY_NUMBER_DEBUG" => Some(Self::Debug), - "SEVERITY_NUMBER_DEBUG2" => Some(Self::Debug2), - "SEVERITY_NUMBER_DEBUG3" => Some(Self::Debug3), - "SEVERITY_NUMBER_DEBUG4" => Some(Self::Debug4), - "SEVERITY_NUMBER_INFO" => Some(Self::Info), - "SEVERITY_NUMBER_INFO2" => Some(Self::Info2), - "SEVERITY_NUMBER_INFO3" => Some(Self::Info3), - "SEVERITY_NUMBER_INFO4" => Some(Self::Info4), - "SEVERITY_NUMBER_WARN" => Some(Self::Warn), - "SEVERITY_NUMBER_WARN2" => Some(Self::Warn2), - "SEVERITY_NUMBER_WARN3" => Some(Self::Warn3), - "SEVERITY_NUMBER_WARN4" => Some(Self::Warn4), - "SEVERITY_NUMBER_ERROR" => Some(Self::Error), - "SEVERITY_NUMBER_ERROR2" => Some(Self::Error2), - "SEVERITY_NUMBER_ERROR3" => Some(Self::Error3), - "SEVERITY_NUMBER_ERROR4" => Some(Self::Error4), - "SEVERITY_NUMBER_FATAL" => Some(Self::Fatal), - "SEVERITY_NUMBER_FATAL2" => Some(Self::Fatal2), - "SEVERITY_NUMBER_FATAL3" => Some(Self::Fatal3), - "SEVERITY_NUMBER_FATAL4" => Some(Self::Fatal4), - _ => None, - } - } - } - /// LogRecordFlags is defined as a protobuf 'uint32' type and is to be used as - /// bit-fields. Each non-zero value defined in this enum is a bit-mask. - /// To extract the bit-field, for example, use an expression like: - /// - /// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) - /// - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] - #[repr(i32)] - pub enum LogRecordFlags { - /// The zero value for the enum. Should not be used for comparisons. - /// Instead use bitwise "and" with the appropriate mask as shown above. - DoNotUse = 0, - /// Bits 0-7 are used for trace flags. - TraceFlagsMask = 255, - } - impl LogRecordFlags { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(flag: u32) -> &'static str { - match flag { - 0 => "LOG_RECORD_FLAGS_DO_NOT_USE", - 255 => "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK", - _ => "Invalid flag", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "LOG_RECORD_FLAGS_DO_NOT_USE" => Some(Self::DoNotUse), - "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK" => Some(Self::TraceFlagsMask), - _ => None, - } - } - } - \ No newline at end of file diff --git a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.metrics.v1.rs b/src/handlers/http/otel/compiled_protos/opentelemetry.proto.metrics.v1.rs deleted file mode 100644 index eb618f7cb..000000000 --- a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.metrics.v1.rs +++ /dev/null @@ -1,677 +0,0 @@ -/* - * Parseable Server (C) 2022 - 2024 Parseable, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -// This file was generated by protoc-gen-rust-protobuf. The file was edited after the generation. - // All the repeated fields were changed to Option>. - -/// MetricsData represents the metrics data that can be stored in a persistent -/// storage, OR can be embedded by other protocols that transfer OTLP metrics -/// data but do not implement the OTLP protocol. -/// -/// MetricsData -/// └─── ResourceMetrics -/// ├── Resource -/// ├── SchemaURL -/// └── ScopeMetrics -/// ├── Scope -/// ├── SchemaURL -/// └── Metric -/// ├── Name -/// ├── Description -/// ├── Unit -/// └── data -/// ├── Gauge -/// ├── Sum -/// ├── Histogram -/// ├── ExponentialHistogram -/// └── Summary -/// -/// The main difference between this message and collector protocol is that -/// in this message there will not be any "control" or "metadata" specific to -/// OTLP protocol. -/// -/// When new fields are added into this message, the OTLP request MUST be updated -/// as well. - use crate::handlers::http::otel::proto::common::v1::InstrumentationScope; - use crate::handlers::http::otel::proto::common::v1::KeyValue; - use crate::handlers::http::otel::proto::resource::v1::Resource; - use serde::{Deserialize, Serialize}; - - #[derive(Serialize, Deserialize, Debug)] - #[serde(rename_all = "camelCase")] -pub struct MetricsData { - /// An array of ResourceMetrics. - /// For data coming from a single resource this array will typically contain - /// one element. Intermediary nodes that receive data from multiple origins - /// typically batch the data before forwarding further and in that case this - /// array will contain multiple elements. - #[serde(rename = "resourceMetrics")] - pub resource_metrics: Option>, -} -/// A collection of ScopeMetrics from a Resource. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct ResourceMetrics { - /// The resource for the metrics in this message. - /// If this field is not set then no resource info is known. - pub resource: Option, - /// A list of metrics that originate from a resource. - #[serde(rename = "scopeMetrics")] - pub scope_metrics: Option>, - /// The Schema URL, if known. This is the identifier of the Schema that the resource data - /// is recorded in. Notably, the last part of the URL path is the version number of the - /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see - /// - /// This schema_url applies to the data in the "resource" field. It does not apply - /// to the data in the "scope_metrics" field which have their own schema_url field. - #[serde(rename = "schemaUrl")] - pub schema_url: Option, -} -/// A collection of Metrics produced by an Scope. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct ScopeMetrics { - /// The instrumentation scope information for the metrics in this message. - /// Semantically when InstrumentationScope isn't set, it is equivalent with - /// an empty instrumentation scope name (unknown). - pub scope: Option, - /// A list of metrics that originate from an instrumentation library. - #[serde(rename = "metrics")] - pub metrics: Vec, - /// The Schema URL, if known. This is the identifier of the Schema that the metric data - /// is recorded in. Notably, the last part of the URL path is the version number of the - /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see - /// - /// This schema_url applies to all metrics in the "metrics" field. - #[serde(rename = "schemaUrl")] - pub schema_url: Option, -} -/// Defines a Metric which has one or more timeseries. The following is a -/// brief summary of the Metric data model. For more details, see: -/// -/// -/// -/// The data model and relation between entities is shown in the -/// diagram below. Here, "DataPoint" is the term used to refer to any -/// one of the specific data point value types, and "points" is the term used -/// to refer to any one of the lists of points contained in the Metric. -/// -/// - Metric is composed of a metadata and data. -/// - Metadata part contains a name, description, unit. -/// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). -/// - DataPoint contains timestamps, attributes, and one of the possible value type -/// fields. -/// -/// Metric -/// +------------+ -/// |name | -/// |description | -/// |unit | +------------------------------------+ -/// |data |---> |Gauge, Sum, Histogram, Summary, ... | -/// +------------+ +------------------------------------+ -/// -/// Data \[One of Gauge, Sum, Histogram, Summary, ...\] -/// +-----------+ -/// |... | // Metadata about the Data. -/// |points |--+ -/// +-----------+ | -/// | +---------------------------+ -/// | |DataPoint 1 | -/// v |+------+------+ +------+ | -/// +-----+ ||label |label |...|label | | -/// | 1 |-->||value1|value2|...|valueN| | -/// +-----+ |+------+------+ +------+ | -/// | . | |+-----+ | -/// | . | ||value| | -/// | . | |+-----+ | -/// | . | +---------------------------+ -/// | . | . -/// | . | . -/// | . | . -/// | . | +---------------------------+ -/// | . | |DataPoint M | -/// +-----+ |+------+------+ +------+ | -/// | M |-->||label |label |...|label | | -/// +-----+ ||value1|value2|...|valueN| | -/// |+------+------+ +------+ | -/// |+-----+ | -/// ||value| | -/// |+-----+ | -/// +---------------------------+ -/// -/// Each distinct type of DataPoint represents the output of a specific -/// aggregation function, the result of applying the DataPoint's -/// associated function of to one or more measurements. -/// -/// All DataPoint types have three common fields: -/// - Attributes includes key-value pairs associated with the data point -/// - TimeUnixNano is required, set to the end time of the aggregation -/// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints -/// having an AggregationTemporality field, as discussed below. -/// -/// Both TimeUnixNano and StartTimeUnixNano values are expressed as -/// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. -/// -/// # TimeUnixNano -/// -/// This field is required, having consistent interpretation across -/// DataPoint types. TimeUnixNano is the moment corresponding to when -/// the data point's aggregate value was captured. -/// -/// Data points with the 0 value for TimeUnixNano SHOULD be rejected -/// by consumers. -/// -/// # StartTimeUnixNano -/// -/// StartTimeUnixNano in general allows detecting when a sequence of -/// observations is unbroken. This field indicates to consumers the -/// start time for points with cumulative and delta -/// AggregationTemporality, and it should be included whenever possible -/// to support correct rate calculation. Although it may be omitted -/// when the start time is truly unknown, setting StartTimeUnixNano is -/// strongly encouraged. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct Metric { - /// name of the metric. - pub name: Option, - /// description of the metric, which can be used in documentation. - pub description: Option, - /// unit in which the metric value is reported. Follows the format - /// described by - pub unit: Option, - /// Additional metadata attributes that describe the metric. \[Optional\]. - /// Attributes are non-identifying. - /// Consumers SHOULD NOT need to be aware of these attributes. - /// These attributes MAY be used to encode information allowing - /// for lossless roundtrip translation to / from another data model. - /// Attribute keys MUST be unique (it is not allowed to have more than one - /// attribute with the same key). - pub metadata: Option>, - /// Data determines the aggregation type (if any) of the metric, what is the - /// reported value type for the data points, as well as the relatationship to - /// the time interval over which they are reported. - pub gauge: Option, - pub sum: Option, - pub histogram: Option, - pub exponential_histogram: Option, - pub summary: Option, -} -/// Gauge represents the type of a scalar metric that always exports the -/// "current value" for every data point. It should be used for an "unknown" -/// aggregation. -/// -/// A Gauge does not support different aggregation temporalities. Given the -/// aggregation is unknown, points cannot be combined using the same -/// aggregation, regardless of aggregation temporalities. Therefore, -/// AggregationTemporality is not included. Consequently, this also means -/// "StartTimeUnixNano" is ignored for all data points. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct Gauge { - pub data_points: Option>, -} -/// Sum represents the type of a scalar metric that is calculated as a sum of all -/// reported measurements over a time interval. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct Sum { - pub data_points: Option>, - /// aggregation_temporality describes if the aggregator reports delta changes - /// since last report time, or cumulative changes since a fixed start time. - pub aggregation_temporality: Option, - /// If "true" means that the sum is monotonic. - pub is_monotonic: Option, -} -/// Histogram represents the type of a metric that is calculated by aggregating -/// as a Histogram of all reported measurements over a time interval. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct Histogram { - - pub data_points: Option>, - /// aggregation_temporality describes if the aggregator reports delta changes - /// since last report time, or cumulative changes since a fixed start time. - pub aggregation_temporality: Option, -} -/// ExponentialHistogram represents the type of a metric that is calculated by aggregating -/// as a ExponentialHistogram of all reported double measurements over a time interval. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct ExponentialHistogram { - pub data_points: Option>, - /// aggregation_temporality describes if the aggregator reports delta changes - /// since last report time, or cumulative changes since a fixed start time. - pub aggregation_temporality: Option, -} -/// Summary metric data are used to convey quantile summaries, -/// a Prometheus (see: ) -/// and OpenMetrics (see: ) -/// data type. These data points cannot always be merged in a meaningful way. -/// While they can be useful in some applications, histogram data points are -/// recommended for new applications. -/// Summary metrics do not have an aggregation temporality field. This is -/// because the count and sum fields of a SummaryDataPoint are assumed to be -/// cumulative values. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct Summary { - pub data_points: Option>, -} -/// NumberDataPoint is a single data point in a timeseries that describes the -/// time-varying scalar value of a metric. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct NumberDataPoint { - /// The set of key/value pairs that uniquely identify the timeseries from - /// where this point belongs. The list may be empty (may contain 0 elements). - /// Attribute keys MUST be unique (it is not allowed to have more than one - /// attribute with the same key). - pub attributes: Option>, - /// StartTimeUnixNano is optional but strongly encouraged, see the - /// the detailed comments above Metric. - /// - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - /// 1970. - pub start_time_unix_nano: Option, - /// TimeUnixNano is required, see the detailed comments above Metric. - /// - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - /// 1970. - pub time_unix_nano: Option, - /// (Optional) List of exemplars collected from - /// measurements that were used to form the data point - pub exemplars: Option>, - /// Flags that apply to this specific data point. See DataPointFlags - /// for the available flags and their meaning. - pub flags: Option, - /// The value itself. A point is considered invalid when one of the recognized - /// value fields is not present inside this oneof. - pub as_double: Option, - pub as_int: Option, -} -/// HistogramDataPoint is a single data point in a timeseries that describes the -/// time-varying values of a Histogram. A Histogram contains summary statistics -/// for a population of values, it may optionally contain the distribution of -/// those values across a set of buckets. -/// -/// If the histogram contains the distribution of values, then both -/// "explicit_bounds" and "bucket counts" fields must be defined. -/// If the histogram does not contain the distribution of values, then both -/// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and -/// "sum" are known. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct HistogramDataPoint { - /// The set of key/value pairs that uniquely identify the timeseries from - /// where this point belongs. The list may be empty (may contain 0 elements). - /// Attribute keys MUST be unique (it is not allowed to have more than one - /// attribute with the same key). - pub attributes: Option>, - /// StartTimeUnixNano is optional but strongly encouraged, see the - /// the detailed comments above Metric. - /// - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - /// 1970. - pub start_time_unix_nano: Option, - /// TimeUnixNano is required, see the detailed comments above Metric. - /// - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - /// 1970. - pub time_unix_nano: Option, - /// count is the number of values in the population. Must be non-negative. This - /// value must be equal to the sum of the "count" fields in buckets if a - /// histogram is provided. - pub count: Option, - /// sum of the values in the population. If count is zero then this field - /// must be zero. - /// - /// Note: Sum should only be filled out when measuring non-negative discrete - /// events, and is assumed to be monotonic over the values of these events. - /// Negative events *can* be recorded, but sum should not be filled out when - /// doing so. This is specifically to enforce compatibility w/ OpenMetrics, - /// see: - pub sum: Option, - /// bucket_counts is an optional field contains the count values of histogram - /// for each bucket. - /// - /// The sum of the bucket_counts must equal the value in the count field. - /// - /// The number of elements in bucket_counts array must be by one greater than - /// the number of elements in explicit_bounds array. - pub bucket_counts: Option>, - /// explicit_bounds specifies buckets with explicitly defined bounds for values. - /// - /// The boundaries for bucket at index i are: - /// - /// (-infinity, explicit_bounds\[i]\] for i == 0 - /// (explicit_bounds\[i-1\], explicit_bounds\[i]\] for 0 < i < size(explicit_bounds) - /// (explicit_bounds\[i-1\], +infinity) for i == size(explicit_bounds) - /// - /// The values in the explicit_bounds array must be strictly increasing. - /// - /// Histogram buckets are inclusive of their upper boundary, except the last - /// bucket where the boundary is at infinity. This format is intentionally - /// compatible with the OpenMetrics histogram definition. - pub explicit_bounds: Option>, - /// (Optional) List of exemplars collected from - /// measurements that were used to form the data point - pub exemplars: Option>, - /// Flags that apply to this specific data point. See DataPointFlags - /// for the available flags and their meaning. - pub flags: Option, - /// min is the minimum value over (start_time, end_time]. - pub min: Option, - /// max is the maximum value over (start_time, end_time]. - pub max: Option, -} -/// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the -/// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains -/// summary statistics for a population of values, it may optionally contain the -/// distribution of those values across a set of buckets. -/// -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct ExponentialHistogramDataPoint { - /// The set of key/value pairs that uniquely identify the timeseries from - /// where this point belongs. The list may be empty (may contain 0 elements). - /// Attribute keys MUST be unique (it is not allowed to have more than one - /// attribute with the same key). - pub attributes: Option>, - /// StartTimeUnixNano is optional but strongly encouraged, see the - /// the detailed comments above Metric. - /// - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - /// 1970. - pub start_time_unix_nano: Option, - /// TimeUnixNano is required, see the detailed comments above Metric. - /// - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - /// 1970. - pub time_unix_nano: Option, - /// count is the number of values in the population. Must be - /// non-negative. This value must be equal to the sum of the "bucket_counts" - /// values in the positive and negative Buckets plus the "zero_count" field. - pub count: Option, - /// sum of the values in the population. If count is zero then this field - /// must be zero. - /// - /// Note: Sum should only be filled out when measuring non-negative discrete - /// events, and is assumed to be monotonic over the values of these events. - /// Negative events *can* be recorded, but sum should not be filled out when - /// doing so. This is specifically to enforce compatibility w/ OpenMetrics, - /// see: - pub sum: Option, - /// scale describes the resolution of the histogram. Boundaries are - /// located at powers of the base, where: - /// - /// base = (2^(2^-scale)) - /// - /// The histogram bucket identified by `index`, a signed integer, - /// contains values that are greater than (base^index) and - /// less than or equal to (base^(index+1)). - /// - /// The positive and negative ranges of the histogram are expressed - /// separately. Negative values are mapped by their absolute value - /// into the negative range using the same scale as the positive range. - /// - /// scale is not restricted by the protocol, as the permissible - /// values depend on the range of the data. - pub scale: Option, - /// zero_count is the count of values that are either exactly zero or - /// within the region considered zero by the instrumentation at the - /// tolerated degree of precision. This bucket stores values that - /// cannot be expressed using the standard exponential formula as - /// well as values that have been rounded to zero. - /// - /// Implementations MAY consider the zero bucket to have probability - /// mass equal to (zero_count / count). - pub zero_count: Option, - /// positive carries the positive range of exponential bucket counts. - pub positive: Option, - /// negative carries the negative range of exponential bucket counts. - pub negative: Option, - /// Flags that apply to this specific data point. See DataPointFlags - /// for the available flags and their meaning. - pub flags: Option, - /// (Optional) List of exemplars collected from - /// measurements that were used to form the data point - pub exemplars: Option>, - /// min is the minimum value over (start_time, end_time]. - pub min: Option, - /// max is the maximum value over (start_time, end_time]. - pub max: Option, - /// ZeroThreshold may be optionally set to convey the width of the zero - /// region. Where the zero region is defined as the closed interval - /// \[-ZeroThreshold, ZeroThreshold\]. - /// When ZeroThreshold is 0, zero count bucket stores values that cannot be - /// expressed using the standard exponential formula as well as values that - /// have been rounded to zero. - pub zero_threshold: Option, -} -/// Nested message and enum types in `ExponentialHistogramDataPoint`. -pub mod exponential_histogram_data_point { - use serde::{Deserialize, Serialize}; - /// Buckets are a set of bucket counts, encoded in a contiguous array - /// of counts. - #[derive(Serialize, Deserialize, Debug)] - #[serde(rename_all = "camelCase")] - pub struct Buckets { - /// Offset is the bucket index of the first entry in the bucket_counts array. - /// - /// Note: This uses a varint encoding as a simple form of compression. - pub offset: Option, - /// bucket_counts is an array of count values, where bucket_counts\[i\] carries - /// the count of the bucket at index (offset+i). bucket_counts\[i\] is the count - /// of values greater than base^(offset+i) and less than or equal to - /// base^(offset+i+1). - /// - /// Note: By contrast, the explicit HistogramDataPoint uses - /// fixed64. This field is expected to have many buckets, - /// especially zeros, so uint64 has been selected to ensure - /// varint encoding. - pub bucket_counts: Option>, - } -} -/// SummaryDataPoint is a single data point in a timeseries that describes the -/// time-varying values of a Summary metric. The count and sum fields represent -/// cumulative values. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct SummaryDataPoint { - /// The set of key/value pairs that uniquely identify the timeseries from - /// where this point belongs. The list may be empty (may contain 0 elements). - /// Attribute keys MUST be unique (it is not allowed to have more than one - /// attribute with the same key). - pub attributes: Option>, - /// StartTimeUnixNano is optional but strongly encouraged, see the - /// the detailed comments above Metric. - /// - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - /// 1970. - pub start_time_unix_nano: Option, - /// TimeUnixNano is required, see the detailed comments above Metric. - /// - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - /// 1970. - pub time_unix_nano: Option, - /// count is the number of values in the population. Must be non-negative. - pub count: Option, - /// sum of the values in the population. If count is zero then this field - /// must be zero. - /// - /// Note: Sum should only be filled out when measuring non-negative discrete - /// events, and is assumed to be monotonic over the values of these events. - /// Negative events *can* be recorded, but sum should not be filled out when - /// doing so. This is specifically to enforce compatibility w/ OpenMetrics, - /// see: - pub sum: Option, - /// (Optional) list of values at different quantiles of the distribution calculated - /// from the current snapshot. The quantiles must be strictly increasing. - pub quantile_values: Option>, - /// Flags that apply to this specific data point. See DataPointFlags - /// for the available flags and their meaning. - pub flags: Option, -} -/// Nested message and enum types in `SummaryDataPoint`. -pub mod summary_data_point { - use serde::{Deserialize, Deserializer, Serialize}; - /// Represents the value at a given quantile of a distribution. - /// - /// To record Min and Max values following conventions are used: - /// - The 1.0 quantile is equivalent to the maximum value observed. - /// - The 0.0 quantile is equivalent to the minimum value observed. - /// - /// See the following issue for more context: - /// - #[derive(Serialize, Deserialize, Debug)] - #[serde(rename_all = "camelCase")] - pub struct ValueAtQuantile { - /// The quantile of a distribution. Must be in the interval - /// \[0.0, 1.0\]. - pub quantile: Option, - /// The value at the given quantile of a distribution. - /// - /// Quantile values must NOT be negative. - #[serde(deserialize_with = "deserialize_f64_or_nan")] - pub value: Option, - } - - fn deserialize_f64_or_nan<'de, D>(deserializer: D) -> Result, D::Error> where D: Deserializer<'de>, - { - struct StringOrFloatVisitor; - impl serde::de::Visitor<'_> for StringOrFloatVisitor - { - type Value = Option; - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result - { - formatter.write_str("a string or a floating-point number") - } - fn visit_str(self, value: &str) -> Result where E: serde::de::Error, - { - if value == "NaN" - { - Ok(Some(f64::NAN)) - } else { - value.parse::().map(Some).map_err(E::custom) - } } - fn visit_f64(self, value: f64) -> Result where E: serde::de::Error, - { - Ok(Some(value)) - } - } - deserializer.deserialize_any(StringOrFloatVisitor) } -} -/// A representation of an exemplar, which is a sample input measurement. -/// Exemplars also hold information about the environment when the measurement -/// was recorded, for example the span and trace ID of the active span when the -/// exemplar was recorded. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct Exemplar { - /// The set of key/value pairs that were filtered out by the aggregator, but - /// recorded alongside the original measurement. Only key/value pairs that were - /// filtered out by the aggregator should be included - pub filtered_attributes: Option>, - /// time_unix_nano is the exact time when this exemplar was recorded - /// - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - /// 1970. - pub time_unix_nano: Option, - /// (Optional) Span ID of the exemplar trace. - /// span_id may be missing if the measurement is not recorded inside a trace - /// or if the trace is not sampled. - pub span_id: Option, - /// (Optional) Trace ID of the exemplar trace. - /// trace_id may be missing if the measurement is not recorded inside a trace - /// or if the trace is not sampled. - pub trace_id: Option, - /// The value of the measurement that was recorded. An exemplar is - /// considered invalid when one of the recognized value fields is not present - /// inside this oneof. - pub as_double: Option, - pub as_int: Option, -} -/// AggregationTemporality defines how a metric aggregator reports aggregated -/// values. It describes how those values relate to the time interval over -/// which they are aggregated. -#[repr(i32)] -pub enum AggregationTemporality { - Unspecified = 0, - Delta = 1, - Cumulative = 2, -} -impl AggregationTemporality { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unspecified => "AGGREGATION_TEMPORALITY_UNSPECIFIED", - Self::Delta => "AGGREGATION_TEMPORALITY_DELTA", - Self::Cumulative => "AGGREGATION_TEMPORALITY_CUMULATIVE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "AGGREGATION_TEMPORALITY_UNSPECIFIED" => Some(Self::Unspecified), - "AGGREGATION_TEMPORALITY_DELTA" => Some(Self::Delta), - "AGGREGATION_TEMPORALITY_CUMULATIVE" => Some(Self::Cumulative), - _ => None, - } - } -} -/// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a -/// bit-field representing 32 distinct boolean flags. Each flag defined in this -/// enum is a bit-mask. To test the presence of a single flag in the flags of -/// a data point, for example, use an expression like: -/// -/// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK -/// -#[repr(i32)] -pub enum DataPointFlags { - /// The zero value for the enum. Should not be used for comparisons. - /// Instead use bitwise "and" with the appropriate mask as shown above. - DoNotUse = 0, - /// This DataPoint is valid but has no recorded value. This value - /// SHOULD be used to reflect explicitly missing data in a series, as - /// for an equivalent to the Prometheus "staleness marker". - NoRecordedValueMask = 1, -} -impl DataPointFlags { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::DoNotUse => "DATA_POINT_FLAGS_DO_NOT_USE", - Self::NoRecordedValueMask => "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "DATA_POINT_FLAGS_DO_NOT_USE" => Some(Self::DoNotUse), - "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK" => Some(Self::NoRecordedValueMask), - _ => None, - } - } -} diff --git a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.resource.v1.rs b/src/handlers/http/otel/compiled_protos/opentelemetry.proto.resource.v1.rs deleted file mode 100644 index 2f102628a..000000000 --- a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.resource.v1.rs +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Parseable Server (C) 2022 - 2024 Parseable, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ -// This file was generated by protoc-gen-rust-protobuf. The file was edited after the generation. -// All the repeated fields were changed to Option> - -use crate::handlers::http::otel::proto::common::v1::KeyValue; -use serde::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize, Debug)] -/// Resource information. -pub struct Resource { - /// Set of attributes that describe the resource. - /// Attribute keys MUST be unique (it is not allowed to have more than one - /// attribute with the same key). - #[serde(rename = "attributes")] - pub attributes: Option>, - /// dropped_attributes_count is the number of dropped attributes. If the value is 0, then - /// no attributes were dropped. - #[serde(rename = "droppedAttributesCount")] - pub dropped_attributes_count: Option, -} diff --git a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.trace.v1.rs b/src/handlers/http/otel/compiled_protos/opentelemetry.proto.trace.v1.rs deleted file mode 100644 index ec8cd3d39..000000000 --- a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.trace.v1.rs +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Parseable Server (C) 2022 - 2024 Parseable, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -// This file was generated by protoc-gen-rust-protobuf. The file was edited after the generation. - // All the repeated fields were changed to Option>. - /// TracesData represents the traces data that can be stored in a persistent storage, -/// OR can be embedded by other protocols that transfer OTLP traces data but do -/// not implement the OTLP protocol. -/// -/// The main difference between this message and collector protocol is that -/// in this message there will not be any "control" or "metadata" specific to -/// OTLP protocol. -/// -/// When new fields are added into this message, the OTLP request MUST be updated -/// as well. - use serde::{Deserialize, Serialize}; - - #[derive(Serialize, Deserialize, Debug)] - #[serde(rename_all = "camelCase")] -pub struct TracesData { - /// An array of ResourceSpans. - /// For data coming from a single resource this array will typically contain - /// one element. Intermediary nodes that receive data from multiple origins - /// typically batch the data before forwarding further and in that case this - /// array will contain multiple elements. - pub resource_spans: Option>, -} -/// A collection of ScopeSpans from a Resource. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct ResourceSpans { - /// The resource for the spans in this message. - /// If this field is not set then no resource info is known. - pub resource: Option, - /// A list of ScopeSpans that originate from a resource. - pub scope_spans: Option>, - /// The Schema URL, if known. This is the identifier of the Schema that the resource data - /// is recorded in. Notably, the last part of the URL path is the version number of the - /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see - /// - /// This schema_url applies to the data in the "resource" field. It does not apply - /// to the data in the "scope_spans" field which have their own schema_url field. - pub schema_url: Option, -} -/// A collection of Spans produced by an InstrumentationScope. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct ScopeSpans { - /// The instrumentation scope information for the spans in this message. - /// Semantically when InstrumentationScope isn't set, it is equivalent with - /// an empty instrumentation scope name (unknown). - pub scope: Option, - /// A list of Spans that originate from an instrumentation scope. - pub spans: Option>, - /// The Schema URL, if known. This is the identifier of the Schema that the span data - /// is recorded in. Notably, the last part of the URL path is the version number of the - /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see - /// - /// This schema_url applies to all spans and span events in the "spans" field. - pub schema_url: Option, -} -/// A Span represents a single operation performed by a single component of the system. -/// -/// The next available field id is 17. -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct Span { - /// A unique identifier for a trace. All spans from the same trace share - /// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - /// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - /// is zero-length and thus is also invalid). - /// - /// This field is required. - pub trace_id: Option, - /// A unique identifier for a span within a trace, assigned when the span - /// is created. The ID is an 8-byte array. An ID with all zeroes OR of length - /// other than 8 bytes is considered invalid (empty string in OTLP/JSON - /// is zero-length and thus is also invalid). - /// - /// This field is required. - pub span_id: Option, - /// trace_state conveys information about request position in multiple distributed tracing graphs. - /// It is a trace_state in w3c-trace-context format: - /// See also for more details about this field. - pub trace_state: Option, - /// The `span_id` of this span's parent span. If this is a root span, then this - /// field must be empty. The ID is an 8-byte array. - pub parent_span_id: Option, - /// Flags, a bit field. - /// - /// Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - /// Context specification. To read the 8-bit W3C trace flag, use - /// `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - /// - /// See for the flag definitions. - /// - /// Bits 8 and 9 represent the 3 states of whether a span's parent - /// is remote. The states are (unknown, is not remote, is remote). - /// To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - /// To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - /// - /// When creating span messages, if the message is logically forwarded from another source - /// with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD - /// be copied as-is. If creating from a source that does not have an equivalent flags field - /// (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST - /// be set to zero. - /// Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - /// - /// \[Optional\]. - pub flags: Option, - /// A description of the span's operation. - /// - /// For example, the name can be a qualified method name or a file name - /// and a line number where the operation is called. A best practice is to use - /// the same display name at the same call point in an application. - /// This makes it easier to correlate spans in different traces. - /// - /// This field is semantically required to be set to non-empty string. - /// Empty value is equivalent to an unknown span name. - /// - /// This field is required. - pub name: Option, - /// Distinguishes between spans generated in a particular context. For example, - /// two spans with the same name may be distinguished using `CLIENT` (caller) - /// and `SERVER` (callee) to identify queueing latency associated with the span. - pub kind: Option, - /// start_time_unix_nano is the start time of the span. On the client side, this is the time - /// kept by the local machine where the span execution starts. On the server side, this - /// is the time when the server's application handler starts running. - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - /// - /// This field is semantically required and it is expected that end_time >= start_time. - pub start_time_unix_nano: Option, - /// end_time_unix_nano is the end time of the span. On the client side, this is the time - /// kept by the local machine where the span execution ends. On the server side, this - /// is the time when the server application handler stops running. - /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - /// - /// This field is semantically required and it is expected that end_time >= start_time. - pub end_time_unix_nano: Option, - /// attributes is a collection of key/value pairs. Note, global attributes - /// like server name can be set using the resource API. Examples of attributes: - /// - /// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - /// "/http/server_latency": 300 - /// "example.com/myattribute": true - /// "example.com/score": 10.239 - /// - /// The OpenTelemetry API specification further restricts the allowed value types: - /// - /// Attribute keys MUST be unique (it is not allowed to have more than one - /// attribute with the same key). - pub attributes: Option>, - /// dropped_attributes_count is the number of attributes that were discarded. Attributes - /// can be discarded because their keys are too long or because there are too many - /// attributes. If this value is 0, then no attributes were dropped. - pub dropped_attributes_count: Option, - /// events is a collection of Event items. - pub events: Option>, - /// dropped_events_count is the number of dropped events. If the value is 0, then no - /// events were dropped. - pub dropped_events_count: Option, - /// links is a collection of Links, which are references from this span to a span - /// in the same or different trace. - pub links: Option>, - /// dropped_links_count is the number of dropped links after the maximum size was - /// enforced. If this value is 0, then no links were dropped. - pub dropped_links_count: Option, - /// An optional final status for this span. Semantically when Status isn't set, it means - /// span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). - pub status: Option, -} -/// Nested message and enum types in `Span`. -pub mod span { - use serde::{Deserialize, Serialize}; - /// Event is a time-stamped annotation of the span, consisting of user-supplied - /// text description and key-value pairs. - #[derive(Serialize, Deserialize, Debug)] - pub struct Event { - /// time_unix_nano is the time the event occurred. - pub time_unix_nano: Option, - /// name of the event. - /// This field is semantically required to be set to non-empty string. - pub name: Option, - /// attributes is a collection of attribute key/value pairs on the event. - /// Attribute keys MUST be unique (it is not allowed to have more than one - /// attribute with the same key). - pub attributes: Option>, - /// dropped_attributes_count is the number of dropped attributes. If the value is 0, - /// then no attributes were dropped. - pub dropped_attributes_count: Option, - } - /// A pointer from the current span to another span in the same trace or in a - /// different trace. For example, this can be used in batching operations, - /// where a single batch handler processes multiple requests from different - /// traces or when the handler receives a request from a different project. - #[derive(Serialize, Deserialize, Debug)] - pub struct Link { - /// A unique identifier of a trace that this linked span is part of. The ID is a - /// 16-byte array. - pub trace_id: Option, - /// A unique identifier for the linked span. The ID is an 8-byte array. - pub span_id: Option, - /// The trace_state associated with the link. - pub trace_state: Option, - /// attributes is a collection of attribute key/value pairs on the link. - /// Attribute keys MUST be unique (it is not allowed to have more than one - /// attribute with the same key). - pub attributes: Option>, - /// dropped_attributes_count is the number of dropped attributes. If the value is 0, - /// then no attributes were dropped. - pub dropped_attributes_count: Option, - /// Flags, a bit field. - /// - /// Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - /// Context specification. To read the 8-bit W3C trace flag, use - /// `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - /// - /// See for the flag definitions. - /// - /// Bits 8 and 9 represent the 3 states of whether the link is remote. - /// The states are (unknown, is not remote, is remote). - /// To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - /// To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - /// - /// Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - /// When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. - /// - /// \[Optional\]. - pub flags: Option, - } - /// SpanKind is the type of span. Can be used to specify additional relationships between spans - /// in addition to a parent/child relationship. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum SpanKind { - /// Unspecified. Do NOT use as default. - /// Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. - Unspecified = 0, - /// Indicates that the span represents an internal operation within an application, - /// as opposed to an operation happening at the boundaries. Default value. - Internal = 1, - /// Indicates that the span covers server-side handling of an RPC or other - /// remote network request. - Server = 2, - /// Indicates that the span describes a request to some remote service. - Client = 3, - /// Indicates that the span describes a producer sending a message to a broker. - /// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - /// between producer and consumer spans. A PRODUCER span ends when the message was accepted - /// by the broker while the logical processing of the message might span a much longer time. - Producer = 4, - /// Indicates that the span describes consumer receiving a message from a broker. - /// Like the PRODUCER kind, there is often no direct critical path latency relationship - /// between producer and consumer spans. - Consumer = 5, - } - impl SpanKind { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unspecified => "SPAN_KIND_UNSPECIFIED", - Self::Internal => "SPAN_KIND_INTERNAL", - Self::Server => "SPAN_KIND_SERVER", - Self::Client => "SPAN_KIND_CLIENT", - Self::Producer => "SPAN_KIND_PRODUCER", - Self::Consumer => "SPAN_KIND_CONSUMER", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "SPAN_KIND_UNSPECIFIED" => Some(Self::Unspecified), - "SPAN_KIND_INTERNAL" => Some(Self::Internal), - "SPAN_KIND_SERVER" => Some(Self::Server), - "SPAN_KIND_CLIENT" => Some(Self::Client), - "SPAN_KIND_PRODUCER" => Some(Self::Producer), - "SPAN_KIND_CONSUMER" => Some(Self::Consumer), - _ => None, - } - } - } -} -/// The Status type defines a logical error model that is suitable for different -/// programming environments, including REST APIs and RPC APIs. -#[derive(Serialize, Deserialize, Debug)] -pub struct Status { - /// A developer-facing human readable error message. - pub message: Option, - /// The status code. - pub code: Option, -} -/// Nested message and enum types in `Status`. -pub mod status { - /// For the semantics of status codes see - /// - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum StatusCode { - /// The default status. - Unset = 0, - /// The Span has been validated by an Application developer or Operator to - /// have completed successfully. - Ok = 1, - /// The Span contains an error. - Error = 2, - } - impl StatusCode { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unset => "STATUS_CODE_UNSET", - Self::Ok => "STATUS_CODE_OK", - Self::Error => "STATUS_CODE_ERROR", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "STATUS_CODE_UNSET" => Some(Self::Unset), - "STATUS_CODE_OK" => Some(Self::Ok), - "STATUS_CODE_ERROR" => Some(Self::Error), - _ => None, - } - } - } -} -/// SpanFlags represents constants used to interpret the -/// Span.flags field, which is protobuf 'fixed32' type and is to -/// be used as bit-fields. Each non-zero value defined in this enum is -/// a bit-mask. To extract the bit-field, for example, use an -/// expression like: -/// -/// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) -/// -/// See for the flag definitions. -/// -/// Note that Span flags were introduced in version 1.1 of the -/// OpenTelemetry protocol. Older Span producers do not set this -/// field, consequently consumers should not rely on the absence of a -/// particular flag bit to indicate the presence of a particular feature. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum SpanFlags { - /// The zero value for the enum. Should not be used for comparisons. - /// Instead use bitwise "and" with the appropriate mask as shown above. - DoNotUse = 0, - /// Bits 0-7 are used for trace flags. - TraceFlagsMask = 255, - /// Bits 8 and 9 are used to indicate that the parent span or link span is remote. - /// Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - /// Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. - ContextHasIsRemoteMask = 256, - ContextIsRemoteMask = 512, -} -impl SpanFlags { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::DoNotUse => "SPAN_FLAGS_DO_NOT_USE", - Self::TraceFlagsMask => "SPAN_FLAGS_TRACE_FLAGS_MASK", - Self::ContextHasIsRemoteMask => "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK", - Self::ContextIsRemoteMask => "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "SPAN_FLAGS_DO_NOT_USE" => Some(Self::DoNotUse), - "SPAN_FLAGS_TRACE_FLAGS_MASK" => Some(Self::TraceFlagsMask), - "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK" => Some(Self::ContextHasIsRemoteMask), - "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK" => Some(Self::ContextIsRemoteMask), - _ => None, - } - } -} diff --git a/src/handlers/http/otel/logs.rs b/src/handlers/http/otel/logs.rs index e6a7d9059..3eef4c972 100644 --- a/src/handlers/http/otel/logs.rs +++ b/src/handlers/http/otel/logs.rs @@ -16,76 +16,50 @@ * */ -use std::collections::BTreeMap; - -use crate::handlers::http::otel::proto::logs::v1::LogRecord; -use crate::handlers::http::otel::proto::logs::v1::LogRecordFlags; -use crate::handlers::http::otel::proto::logs::v1::LogsData; -use crate::handlers::http::otel::proto::logs::v1::SeverityNumber; use bytes::Bytes; +use opentelemetry_proto::tonic::logs::v1::LogRecord; +use opentelemetry_proto::tonic::logs::v1::LogsData; +use opentelemetry_proto::tonic::logs::v1::ScopeLogs; +use opentelemetry_proto::tonic::logs::v1::SeverityNumber; use serde_json::Value; +use std::collections::BTreeMap; use super::collect_json_from_values; use super::insert_attributes; -use super::insert_if_some; -use super::insert_number_if_some; -use super::proto::logs::v1::ScopeLogs; /// otel log event has severity number /// there is a mapping of severity number to severity text provided in proto /// this function fetches the severity text from the severity number /// and adds it to the flattened json -fn flatten_severity(severity_number: &Option) -> BTreeMap { +fn flatten_severity(severity_number: i32) -> BTreeMap { let mut severity_json: BTreeMap = BTreeMap::new(); - insert_number_if_some( - &mut severity_json, - "severity_number", - &severity_number.map(|f| f as f64), + severity_json.insert( + "severity_number".to_string(), + Value::Number(severity_number.into()), + ); + let severity = SeverityNumber::try_from(severity_number).unwrap(); + severity_json.insert( + "severity_text".to_string(), + Value::String(severity.as_str_name().to_string()), ); - if let Some(severity_number) = severity_number { - insert_if_some( - &mut severity_json, - "severity_text", - &Some(SeverityNumber::as_str_name(*severity_number)), - ); - } severity_json } -/// otel log event has flags -/// there is a mapping of flags to flags text provided in proto -/// this function fetches the flags text from the flags -/// and adds it to the flattened json -fn flatten_flags(flags: &Option) -> BTreeMap { - let mut flags_json: BTreeMap = BTreeMap::new(); - insert_number_if_some(&mut flags_json, "flags_number", &flags.map(|f| f as f64)); - if let Some(flags) = flags { - insert_if_some( - &mut flags_json, - "flags_string", - &Some(LogRecordFlags::as_str_name(*flags)), - ); - } - flags_json -} - /// this function flattens the `LogRecord` object /// and returns a `BTreeMap` of the flattened json /// this function is called recursively for each log record object in the otel logs pub fn flatten_log_record(log_record: &LogRecord) -> BTreeMap { let mut log_record_json: BTreeMap = BTreeMap::new(); - insert_if_some( - &mut log_record_json, - "time_unix_nano", - &log_record.time_unix_nano, + log_record_json.insert( + "time_unix_nano".to_string(), + Value::Number(log_record.time_unix_nano.into()), ); - insert_if_some( - &mut log_record_json, - "observed_time_unix_nano", - &log_record.observed_time_unix_nano, + log_record_json.insert( + "observable_time_unix_nano".to_string(), + Value::Number(log_record.observed_time_unix_nano.into()), ); - log_record_json.extend(flatten_severity(&log_record.severity_number)); + log_record_json.extend(flatten_severity(log_record.severity_number)); if log_record.body.is_some() { let body = &log_record.body; @@ -95,15 +69,23 @@ pub fn flatten_log_record(log_record: &LogRecord) -> BTreeMap { } } insert_attributes(&mut log_record_json, &log_record.attributes); - insert_number_if_some( - &mut log_record_json, - "log_record_dropped_attributes_count", - &log_record.dropped_attributes_count.map(|f| f as f64), + log_record_json.insert( + "log_record_dropped_attributes_count".to_string(), + Value::Number(log_record.dropped_attributes_count.into()), ); - log_record_json.extend(flatten_flags(&log_record.flags)); - insert_if_some(&mut log_record_json, "span_id", &log_record.span_id); - insert_if_some(&mut log_record_json, "trace_id", &log_record.trace_id); + log_record_json.insert( + "flags".to_string(), + Value::Number((log_record.flags).into()), + ); + log_record_json.insert( + "span_id".to_string(), + Value::String(hex::encode(&log_record.span_id)), + ); + log_record_json.insert( + "trace_id".to_string(), + Value::String(hex::encode(&log_record.trace_id)), + ); log_record_json } @@ -115,22 +97,21 @@ fn flatten_scope_log(scope_log: &ScopeLogs) -> Vec> { let mut scope_log_json = BTreeMap::new(); if let Some(scope) = &scope_log.scope { - insert_if_some(&mut scope_log_json, "scope_name", &scope.name); - insert_if_some(&mut scope_log_json, "scope_version", &scope.version); - insert_attributes(&mut scope_log_json, &scope.attributes); - insert_number_if_some( - &mut scope_log_json, - "scope_dropped_attributes_count", - &scope.dropped_attributes_count.map(|f| f as f64), + scope_log_json.insert("scope_name".to_string(), Value::String(scope.name.clone())); + scope_log_json.insert( + "scope_version".to_string(), + Value::String(scope.version.clone()), ); - } - - if let Some(schema_url) = &scope_log.schema_url { + insert_attributes(&mut scope_log_json, &scope.attributes); scope_log_json.insert( - "scope_log_schema_url".to_string(), - Value::String(schema_url.clone()), + "scope_dropped_attributes_count".to_string(), + Value::Number(scope.dropped_attributes_count.into()), ); } + scope_log_json.insert( + "scope_log_schema_url".to_string(), + Value::String(scope_log.schema_url.clone()), + ); for log_record in &scope_log.log_records { let log_record_json = flatten_log_record(log_record); @@ -149,34 +130,31 @@ pub fn flatten_otel_logs(body: &Bytes) -> Vec> { let message: LogsData = serde_json::from_str(body_str).unwrap(); let mut vec_otel_json = Vec::new(); - if let Some(records) = &message.resource_logs { - for record in records { - let mut resource_log_json = BTreeMap::new(); - - if let Some(resource) = &record.resource { - insert_attributes(&mut resource_log_json, &resource.attributes); - insert_number_if_some( - &mut resource_log_json, - "resource_dropped_attributes_count", - &resource.dropped_attributes_count.map(|f| f as f64), - ); - } - - let mut vec_resource_logs_json = Vec::new(); - if let Some(scope_logs) = &record.scope_logs { - for scope_log in scope_logs { - vec_resource_logs_json.extend(flatten_scope_log(scope_log)); - } - } - - insert_if_some(&mut resource_log_json, "schema_url", &record.schema_url); - - for resource_logs_json in &mut vec_resource_logs_json { - resource_logs_json.extend(resource_log_json.clone()); - } - - vec_otel_json.extend(vec_resource_logs_json); + for record in &message.resource_logs { + let mut resource_log_json = BTreeMap::new(); + + if let Some(resource) = &record.resource { + insert_attributes(&mut resource_log_json, &resource.attributes); + resource_log_json.insert( + "resource_dropped_attributes_count".to_string(), + Value::Number(resource.dropped_attributes_count.into()), + ); } + + let mut vec_resource_logs_json = Vec::new(); + for scope_log in &record.scope_logs { + vec_resource_logs_json.extend(flatten_scope_log(scope_log)); + } + resource_log_json.insert( + "schema_url".to_string(), + Value::String(record.schema_url.clone()), + ); + + for resource_logs_json in &mut vec_resource_logs_json { + resource_logs_json.extend(resource_log_json.clone()); + } + + vec_otel_json.extend(vec_resource_logs_json); } vec_otel_json diff --git a/src/handlers/http/otel/metrics.rs b/src/handlers/http/otel/metrics.rs index a57abc92d..cdaee16ff 100644 --- a/src/handlers/http/otel/metrics.rs +++ b/src/handlers/http/otel/metrics.rs @@ -19,15 +19,14 @@ use std::collections::BTreeMap; use bytes::Bytes; +use opentelemetry_proto::tonic::metrics::v1::number_data_point::Value as NumberDataPointValue; +use opentelemetry_proto::tonic::metrics::v1::{ + exemplar::Value as ExemplarValue, exponential_histogram_data_point::Buckets, metric, Exemplar, + ExponentialHistogram, Gauge, Histogram, Metric, MetricsData, NumberDataPoint, Sum, Summary, +}; use serde_json::Value; -use super::{ - insert_attributes, insert_bool_if_some, insert_if_some, insert_number_if_some, - proto::metrics::v1::{ - exponential_histogram_data_point::Buckets, Exemplar, ExponentialHistogram, Gauge, - Histogram, Metric, MetricsData, NumberDataPoint, Sum, Summary, - }, -}; +use super::{insert_attributes, insert_number_if_some}; /// otel metrics event has json array for exemplar /// this function flatten the exemplar json array @@ -37,19 +36,34 @@ fn flatten_exemplar(exemplars: &[Exemplar]) -> BTreeMap { let mut exemplar_json = BTreeMap::new(); for exemplar in exemplars { insert_attributes(&mut exemplar_json, &exemplar.filtered_attributes); - insert_if_some( - &mut exemplar_json, - "exemplar_time_unix_nano", - &exemplar.time_unix_nano, + exemplar_json.insert( + "exemplar_time_unix_nano".to_string(), + Value::Number(exemplar.time_unix_nano.into()), ); - insert_if_some(&mut exemplar_json, "exemplar_span_id", &exemplar.span_id); - insert_if_some(&mut exemplar_json, "exemplar_trace_id", &exemplar.trace_id); - insert_number_if_some( - &mut exemplar_json, - "exemplar_as_double", - &exemplar.as_double, + exemplar_json.insert( + "exemplar_span_id".to_string(), + Value::String(hex::encode(&exemplar.span_id)), + ); + exemplar_json.insert( + "exemplar_trace_id".to_string(), + Value::String(hex::encode(&exemplar.trace_id)), ); - insert_if_some(&mut exemplar_json, "exemplar_as_int", &exemplar.as_int); + if let Some(value) = &exemplar.value { + match value { + ExemplarValue::AsDouble(double_val) => { + exemplar_json.insert( + "exemplar_value_as_double".to_string(), + Value::Number(serde_json::Number::from_f64(*double_val).unwrap()), + ); + } + ExemplarValue::AsInt(int_val) => { + exemplar_json.insert( + "exemplar_value_as_int".to_string(), + Value::Number(serde_json::Number::from(*int_val)), + ); + } + } + } } exemplar_json } @@ -64,30 +78,33 @@ fn flatten_number_data_points(data_points: &[NumberDataPoint]) -> Vec { + data_point_json.insert( + "data_point_value_as_double".to_string(), + Value::Number(serde_json::Number::from_f64(*double_val).unwrap()), + ); + } + NumberDataPointValue::AsInt(int_val) => { + data_point_json.insert( + "data_point_value_as_int".to_string(), + Value::Number(serde_json::Number::from(*int_val)), + ); + } } } data_point_json @@ -101,15 +118,13 @@ fn flatten_number_data_points(data_points: &[NumberDataPoint]) -> Vec Vec> { let mut vec_gauge_json = Vec::new(); - if let Some(data_points) = &gauge.data_points { - let data_points_json = flatten_number_data_points(data_points); - for data_point_json in data_points_json { - let mut gauge_json = BTreeMap::new(); - for (key, value) in &data_point_json { - gauge_json.insert(format!("gauge_{}", key), value.clone()); - } - vec_gauge_json.push(gauge_json); + let data_points_json = flatten_number_data_points(&gauge.data_points); + for data_point_json in data_points_json { + let mut gauge_json = BTreeMap::new(); + for (key, value) in &data_point_json { + gauge_json.insert(format!("gauge_{}", key), value.clone()); } + vec_gauge_json.push(gauge_json); } vec_gauge_json } @@ -120,24 +135,23 @@ fn flatten_gauge(gauge: &Gauge) -> Vec> { /// and returns a `Vec` of `BTreeMap` for each data point fn flatten_sum(sum: &Sum) -> Vec> { let mut vec_sum_json = Vec::new(); - if let Some(data_points) = &sum.data_points { - let data_points_json = flatten_number_data_points(data_points); - for data_point_json in data_points_json { - let mut sum_json = BTreeMap::new(); - for (key, value) in &data_point_json { - sum_json.insert(format!("sum_{}", key), value.clone()); - } - vec_sum_json.push(sum_json); - } + let data_points_json = flatten_number_data_points(&sum.data_points); + for data_point_json in data_points_json { let mut sum_json = BTreeMap::new(); - sum_json.extend(flatten_aggregation_temporality( - &sum.aggregation_temporality, - )); - insert_bool_if_some(&mut sum_json, "sum_is_monotonic", &sum.is_monotonic); - for data_point_json in &mut vec_sum_json { - for (key, value) in &sum_json { - data_point_json.insert(key.clone(), value.clone()); - } + for (key, value) in &data_point_json { + sum_json.insert(format!("sum_{}", key), value.clone()); + } + vec_sum_json.push(sum_json); + } + let mut sum_json = BTreeMap::new(); + sum_json.extend(flatten_aggregation_temporality(sum.aggregation_temporality)); + sum_json.insert( + "sum_is_monotonic".to_string(), + Value::Bool(sum.is_monotonic), + ); + for data_point_json in &mut vec_sum_json { + for (key, value) in &sum_json { + data_point_json.insert(key.clone(), value.clone()); } } vec_sum_json @@ -149,58 +163,50 @@ fn flatten_sum(sum: &Sum) -> Vec> { /// and returns a `Vec` of `BTreeMap` for each data point fn flatten_histogram(histogram: &Histogram) -> Vec> { let mut data_points_json = Vec::new(); - if let Some(histogram_data_points) = &histogram.data_points { - for data_point in histogram_data_points { - let mut data_point_json = BTreeMap::new(); - insert_attributes(&mut data_point_json, &data_point.attributes); - insert_if_some( - &mut data_point_json, - "histogram_start_time_unix_nano", - &data_point.start_time_unix_nano, - ); - insert_if_some( - &mut data_point_json, - "histogram_time_unix_nano", - &data_point.time_unix_nano, - ); - insert_if_some( - &mut data_point_json, - "histogram_data_point_count", - &data_point.count, + for data_point in &histogram.data_points { + let mut data_point_json = BTreeMap::new(); + insert_attributes(&mut data_point_json, &data_point.attributes); + data_point_json.insert( + "histogram_start_time_unix_nano".to_string(), + Value::Number(data_point.start_time_unix_nano.into()), + ); + data_point_json.insert( + "histogram_time_unix_nano".to_string(), + Value::Number(data_point.time_unix_nano.into()), + ); + data_point_json.insert( + "histogram_data_point_count".to_string(), + Value::Number(data_point.count.into()), + ); + insert_number_if_some( + &mut data_point_json, + "histogram_data_point_sum", + &data_point.sum, + ); + for (index, bucket_count) in data_point.bucket_counts.iter().enumerate() { + data_point_json.insert( + format!("histogram_data_point_bucket_count_{}", index + 1), + Value::String(bucket_count.to_string()), ); - insert_number_if_some( - &mut data_point_json, - "histogram_data_point_sum", - &data_point.sum, + } + for (index, explicit_bound) in data_point.explicit_bounds.iter().enumerate() { + data_point_json.insert( + format!("histogram_data_point_explicit_bound_{}", index + 1), + Value::String(explicit_bound.to_string()), ); - if let Some(bucket_counts) = &data_point.bucket_counts { - for (index, bucket_count) in bucket_counts.iter().enumerate() { - data_point_json.insert( - format!("histogram_data_point_bucket_count_{}", index + 1), - Value::String(bucket_count.to_string()), - ); - } - } - if let Some(explicit_bounds) = &data_point.explicit_bounds { - for (index, explicit_bound) in explicit_bounds.iter().enumerate() { - data_point_json.insert( - format!("histogram_data_point_explicit_bound_{}", index + 1), - Value::String(explicit_bound.to_string()), - ); - } - } - if let Some(exemplars) = &data_point.exemplars { - let exemplar_json = flatten_exemplar(exemplars); - for (key, value) in exemplar_json { - data_point_json.insert(format!("histogram_{}", key), value); - } - } - data_points_json.push(data_point_json); } + let exemplar_json = flatten_exemplar(&data_point.exemplars); + for (key, value) in exemplar_json { + data_point_json.insert(format!("histogram_{}", key), value); + } + data_point_json.extend(flatten_data_point_flags(data_point.flags)); + insert_number_if_some(&mut data_point_json, "histogram_min", &data_point.min); + insert_number_if_some(&mut data_point_json, "histogram_max", &data_point.max); + data_points_json.push(data_point_json); } let mut histogram_json = BTreeMap::new(); histogram_json.extend(flatten_aggregation_temporality( - &histogram.aggregation_temporality, + histogram.aggregation_temporality, )); for data_point_json in &mut data_points_json { for (key, value) in &histogram_json { @@ -215,14 +221,13 @@ fn flatten_histogram(histogram: &Histogram) -> Vec> { /// and returns a `BTreeMap` of the flattened json fn flatten_buckets(bucket: &Buckets) -> BTreeMap { let mut bucket_json = BTreeMap::new(); - insert_number_if_some(&mut bucket_json, "offset", &bucket.offset.map(|v| v as f64)); - if let Some(bucket_counts) = &bucket.bucket_counts { - for (index, bucket_count) in bucket_counts.iter().enumerate() { - bucket_json.insert( - format!("bucket_count_{}", index + 1), - Value::String(bucket_count.to_string()), - ); - } + bucket_json.insert("offset".to_string(), Value::Number(bucket.offset.into())); + + for (index, bucket_count) in bucket.bucket_counts.iter().enumerate() { + bucket_json.insert( + format!("bucket_count_{}", index + 1), + Value::String(bucket_count.to_string()), + ); } bucket_json } @@ -233,66 +238,55 @@ fn flatten_buckets(bucket: &Buckets) -> BTreeMap { /// and returns a `Vec` of `BTreeMap` for each data point fn flatten_exp_histogram(exp_histogram: &ExponentialHistogram) -> Vec> { let mut data_points_json = Vec::new(); - if let Some(exp_histogram_data_points) = &exp_histogram.data_points { - for data_point in exp_histogram_data_points { - let mut data_point_json = BTreeMap::new(); - insert_attributes(&mut data_point_json, &data_point.attributes); - insert_if_some( - &mut data_point_json, - "exponential_histogram_start_time_unix_nano", - &data_point.start_time_unix_nano, - ); - insert_if_some( - &mut data_point_json, - "exponential_histogram_time_unix_nano", - &data_point.time_unix_nano, - ); - insert_if_some( - &mut data_point_json, - "exponential_histogram_data_point_count", - &data_point.count, - ); - insert_number_if_some( - &mut data_point_json, - "exponential_histogram_data_point_sum", - &data_point.sum, - ); - insert_number_if_some( - &mut data_point_json, - "exponential_histogram_data_point_scale", - &data_point.scale.map(|v| v as f64), - ); - insert_number_if_some( - &mut data_point_json, - "exponential_histogram_data_point_zero_count", - &data_point.zero_count.map(|v| v as f64), - ); - if let Some(positive) = &data_point.positive { - let positive_json = flatten_buckets(positive); - for (key, value) in positive_json { - data_point_json - .insert(format!("exponential_histogram_positive_{}", key), value); - } - } - if let Some(negative) = &data_point.negative { - let negative_json = flatten_buckets(negative); - for (key, value) in negative_json { - data_point_json - .insert(format!("exponential_histogram_negative_{}", key), value); - } + for data_point in &exp_histogram.data_points { + let mut data_point_json = BTreeMap::new(); + insert_attributes(&mut data_point_json, &data_point.attributes); + data_point_json.insert( + "exponential_histogram_start_time_unix_nano".to_string(), + Value::Number(data_point.start_time_unix_nano.into()), + ); + data_point_json.insert( + "exponential_histogram_time_unix_nano".to_string(), + Value::Number(data_point.time_unix_nano.into()), + ); + data_point_json.insert( + "exponential_histogram_data_point_count".to_string(), + Value::Number(data_point.count.into()), + ); + insert_number_if_some( + &mut data_point_json, + "exponential_histogram_data_point_sum", + &data_point.sum, + ); + data_point_json.insert( + "exponential_histogram_data_point_scale".to_string(), + Value::Number(data_point.scale.into()), + ); + data_point_json.insert( + "exponential_histogram_data_point_zero_count".to_string(), + Value::Number(data_point.zero_count.into()), + ); + if let Some(positive) = &data_point.positive { + let positive_json = flatten_buckets(positive); + for (key, value) in positive_json { + data_point_json.insert(format!("exponential_histogram_positive_{}", key), value); } - if let Some(exemplars) = &data_point.exemplars { - let exemplar_json = flatten_exemplar(exemplars); - for (key, value) in exemplar_json { - data_point_json.insert(format!("exponential_histogram_{}", key), value); - } + } + if let Some(negative) = &data_point.negative { + let negative_json = flatten_buckets(negative); + for (key, value) in negative_json { + data_point_json.insert(format!("exponential_histogram_negative_{}", key), value); } - data_points_json.push(data_point_json); } + let exemplar_json = flatten_exemplar(&data_point.exemplars); + for (key, value) in exemplar_json { + data_point_json.insert(format!("exponential_histogram_{}", key), value); + } + data_points_json.push(data_point_json); } let mut exp_histogram_json = BTreeMap::new(); exp_histogram_json.extend(flatten_aggregation_temporality( - &exp_histogram.aggregation_temporality, + exp_histogram.aggregation_temporality, )); for data_point_json in &mut data_points_json { for (key, value) in &exp_histogram_json { @@ -308,46 +302,36 @@ fn flatten_exp_histogram(exp_histogram: &ExponentialHistogram) -> Vec Vec> { let mut data_points_json = Vec::new(); - if let Some(summary_data_points) = &summary.data_points { - for data_point in summary_data_points { - let mut data_point_json = BTreeMap::new(); - insert_attributes(&mut data_point_json, &data_point.attributes); - insert_if_some( - &mut data_point_json, - "summary_start_time_unix_nano", - &data_point.start_time_unix_nano, - ); - insert_if_some( - &mut data_point_json, - "summary_time_unix_nano", - &data_point.time_unix_nano, - ); - insert_if_some( - &mut data_point_json, - "summary_data_point_count", - &data_point.count, + for data_point in &summary.data_points { + let mut data_point_json = BTreeMap::new(); + insert_attributes(&mut data_point_json, &data_point.attributes); + data_point_json.insert( + "summary_start_time_unix_nano".to_string(), + Value::Number(data_point.start_time_unix_nano.into()), + ); + data_point_json.insert( + "summary_time_unix_nano".to_string(), + Value::Number(data_point.time_unix_nano.into()), + ); + data_point_json.insert( + "summary_data_point_count".to_string(), + Value::Number(data_point.count.into()), + ); + data_point_json.insert( + "summary_data_point_sum".to_string(), + Value::Number(serde_json::Number::from_f64(data_point.sum).unwrap()), + ); + for (index, quantile_value) in data_point.quantile_values.iter().enumerate() { + data_point_json.insert( + format!("summary_quantile_value_quantile_{}", index + 1), + Value::Number(serde_json::Number::from_f64(quantile_value.quantile).unwrap()), ); - insert_number_if_some( - &mut data_point_json, - "summary_data_point_sum", - &data_point.sum, + data_point_json.insert( + format!("summary_quantile_value_value_{}", index + 1), + Value::Number(serde_json::Number::from_f64(quantile_value.value).unwrap()), ); - if let Some(quantile_values) = &data_point.quantile_values { - for (index, quantile_value) in quantile_values.iter().enumerate() { - insert_number_if_some( - &mut data_point_json, - &format!("summary_quantile_value_quantile_{}", index + 1), - &quantile_value.quantile, - ); - insert_if_some( - &mut data_point_json, - &format!("summary_quantile_value_value_{}", index + 1), - &quantile_value.value, - ); - } - } - data_points_json.push(data_point_json); } + data_points_json.push(data_point_json); } data_points_json } @@ -360,28 +344,37 @@ fn flatten_summary(summary: &Summary) -> Vec> { pub fn flatten_metrics_record(metrics_record: &Metric) -> Vec> { let mut data_points_json = Vec::new(); let mut metric_json = BTreeMap::new(); - if let Some(gauge) = &metrics_record.gauge { - data_points_json.extend(flatten_gauge(gauge)); - } - if let Some(sum) = &metrics_record.sum { - data_points_json.extend(flatten_sum(sum)); - } - if let Some(histogram) = &metrics_record.histogram { - data_points_json.extend(flatten_histogram(histogram)); - } - if let Some(exp_histogram) = &metrics_record.exponential_histogram { - data_points_json.extend(flatten_exp_histogram(exp_histogram)); - } - if let Some(summary) = &metrics_record.summary { - data_points_json.extend(flatten_summary(summary)); + + match &metrics_record.data { + Some(metric::Data::Gauge(gauge)) => { + data_points_json.extend(flatten_gauge(gauge)); + } + Some(metric::Data::Sum(sum)) => { + data_points_json.extend(flatten_sum(sum)); + } + Some(metric::Data::Histogram(histogram)) => { + data_points_json.extend(flatten_histogram(histogram)); + } + Some(metric::Data::ExponentialHistogram(exp_histogram)) => { + data_points_json.extend(flatten_exp_histogram(exp_histogram)); + } + Some(metric::Data::Summary(summary)) => { + data_points_json.extend(flatten_summary(summary)); + } + None => {} } - insert_if_some(&mut metric_json, "metric_name", &metrics_record.name); - insert_if_some( - &mut metric_json, - "metric_description", - &metrics_record.description, + metric_json.insert( + "metric_name".to_string(), + Value::String(metrics_record.name.clone()), + ); + metric_json.insert( + "metric_description".to_string(), + Value::String(metrics_record.description.clone()), + ); + metric_json.insert( + "metric_unit".to_string(), + Value::String(metrics_record.unit.clone()), ); - insert_if_some(&mut metric_json, "metric_unit", &metrics_record.unit); insert_attributes(&mut metric_json, &metrics_record.metadata); for data_point_json in &mut data_points_json { for (key, value) in &metric_json { @@ -397,61 +390,55 @@ pub fn flatten_otel_metrics(body: &Bytes) -> Vec> { let body_str = std::str::from_utf8(body).unwrap(); let message: MetricsData = serde_json::from_str(body_str).unwrap(); let mut vec_otel_json = Vec::new(); - if let Some(records) = &message.resource_metrics { - for record in records { - let mut resource_metrics_json = BTreeMap::new(); - if let Some(resource) = &record.resource { - insert_attributes(&mut resource_metrics_json, &resource.attributes); - insert_number_if_some( - &mut resource_metrics_json, - "resource_dropped_attributes_count", - &resource.dropped_attributes_count.map(|f| f as f64), - ); + for record in &message.resource_metrics { + let mut resource_metrics_json = BTreeMap::new(); + if let Some(resource) = &record.resource { + insert_attributes(&mut resource_metrics_json, &resource.attributes); + resource_metrics_json.insert( + "resource_dropped_attributes_count".to_string(), + Value::Number(resource.dropped_attributes_count.into()), + ); + } + let mut vec_scope_metrics_json = Vec::new(); + for scope_metric in &record.scope_metrics { + let mut scope_metrics_json = BTreeMap::new(); + for metrics_record in &scope_metric.metrics { + vec_scope_metrics_json.extend(flatten_metrics_record(metrics_record)); } - let mut vec_scope_metrics_json = Vec::new(); - if let Some(scope_metrics) = &record.scope_metrics { - for scope_metric in scope_metrics { - let mut scope_metrics_json = BTreeMap::new(); - for metrics_record in &scope_metric.metrics { - vec_scope_metrics_json.extend(flatten_metrics_record(metrics_record)); - } - if let Some(scope) = &scope_metric.scope { - insert_if_some(&mut scope_metrics_json, "scope_name", &scope.name); - insert_if_some(&mut scope_metrics_json, "scope_version", &scope.version); - insert_attributes(&mut scope_metrics_json, &scope.attributes); - insert_number_if_some( - &mut scope_metrics_json, - "scope_dropped_attributes_count", - &scope.dropped_attributes_count.map(|f| f as f64), - ); - for scope_metric_json in &mut vec_scope_metrics_json { - for (key, value) in &scope_metrics_json { - scope_metric_json.insert(key.clone(), value.clone()); - } - } - } - if let Some(schema_url) = &scope_metric.schema_url { - for scope_metrics_json in &mut vec_scope_metrics_json { - scope_metrics_json.insert( - "scope_metrics_schema_url".to_string(), - Value::String(schema_url.clone()), - ); - } - } - } + if let Some(scope) = &scope_metric.scope { + scope_metrics_json + .insert("scope_name".to_string(), Value::String(scope.name.clone())); + scope_metrics_json.insert( + "scope_version".to_string(), + Value::String(scope.version.clone()), + ); + insert_attributes(&mut scope_metrics_json, &scope.attributes); + scope_metrics_json.insert( + "scope_dropped_attributes_count".to_string(), + Value::Number(scope.dropped_attributes_count.into()), + ); } - insert_if_some( - &mut resource_metrics_json, - "resource_metrics_schema_url", - &record.schema_url, + scope_metrics_json.insert( + "scope_metrics_schema_url".to_string(), + Value::String(scope_metric.schema_url.clone()), ); - for resource_metric_json in &mut vec_scope_metrics_json { - for (key, value) in &resource_metrics_json { - resource_metric_json.insert(key.clone(), value.clone()); + + for scope_metric_json in &mut vec_scope_metrics_json { + for (key, value) in &scope_metrics_json { + scope_metric_json.insert(key.clone(), value.clone()); } } - vec_otel_json.extend(vec_scope_metrics_json); } + resource_metrics_json.insert( + "resource_metrics_schema_url".to_string(), + Value::String(record.schema_url.clone()), + ); + for resource_metric_json in &mut vec_scope_metrics_json { + for (key, value) in &resource_metrics_json { + resource_metric_json.insert(key.clone(), value.clone()); + } + } + vec_otel_json.extend(vec_scope_metrics_json); } vec_otel_json } @@ -460,28 +447,37 @@ pub fn flatten_otel_metrics(body: &Bytes) -> Vec> { /// there is a mapping of aggregation temporality to its description provided in proto /// this function fetches the description from the aggregation temporality /// and adds it to the flattened json -fn flatten_aggregation_temporality( - aggregation_temporality: &Option, -) -> BTreeMap { +fn flatten_aggregation_temporality(aggregation_temporality: i32) -> BTreeMap { let mut aggregation_temporality_json = BTreeMap::new(); - insert_number_if_some( - &mut aggregation_temporality_json, - "aggregation_temporality", - &aggregation_temporality.map(|f| f as f64), + aggregation_temporality_json.insert( + "aggregation_temporality".to_string(), + Value::Number(aggregation_temporality.into()), + ); + let description = match aggregation_temporality { + 0 => "AGGREGATION_TEMPORALITY_UNSPECIFIED", + 1 => "AGGREGATION_TEMPORALITY_DELTA", + 2 => "AGGREGATION_TEMPORALITY_CUMULATIVE", + _ => "", + }; + aggregation_temporality_json.insert( + "aggregation_temporality_description".to_string(), + Value::String(description.to_string()), ); - - if let Some(aggregation_temporality) = aggregation_temporality { - let description = match aggregation_temporality { - 0 => "AGGREGATION_TEMPORALITY_UNSPECIFIED", - 1 => "AGGREGATION_TEMPORALITY_DELTA", - 2 => "AGGREGATION_TEMPORALITY_CUMULATIVE", - _ => "", - }; - aggregation_temporality_json.insert( - "aggregation_temporality_description".to_string(), - Value::String(description.to_string()), - ); - } aggregation_temporality_json } + +fn flatten_data_point_flags(flags: u32) -> BTreeMap { + let mut data_point_flags_json = BTreeMap::new(); + data_point_flags_json.insert("data_point_flags".to_string(), Value::Number(flags.into())); + let description = match flags { + 0 => "DATA_POINT_FLAGS_DO_NOT_USE", + 1 => "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK", + _ => "", + }; + data_point_flags_json.insert( + "data_point_flags_description".to_string(), + Value::String(description.to_string()), + ); + data_point_flags_json +} diff --git a/src/handlers/http/otel/opentelemetry/proto/README.md b/src/handlers/http/otel/opentelemetry/proto/README.md deleted file mode 100644 index d0281330e..000000000 --- a/src/handlers/http/otel/opentelemetry/proto/README.md +++ /dev/null @@ -1,2 +0,0 @@ -The following protobuf definitions are vendored from: -https://github.com/open-telemetry/opentelemetry-proto/tree/v1.0.0/opentelemetry/proto diff --git a/src/handlers/http/otel/opentelemetry/proto/common/v1/common.proto b/src/handlers/http/otel/opentelemetry/proto/common/v1/common.proto deleted file mode 100644 index 59e348480..000000000 --- a/src/handlers/http/otel/opentelemetry/proto/common/v1/common.proto +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.common.v1; - -option csharp_namespace = "OpenTelemetry.Proto.Common.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.common.v1"; -option java_outer_classname = "CommonProto"; -option go_package = "go.opentelemetry.io/proto/otlp/common/v1"; - -// AnyValue is used to represent any type of attribute value. AnyValue may contain a -// primitive value such as a string or integer or it may contain an arbitrary nested -// object containing arrays, key-value lists and primitives. -message AnyValue { - // The value is one of the listed fields. It is valid for all values to be unspecified - // in which case this AnyValue is considered to be "empty". - oneof value { - string string_value = 1; - bool bool_value = 2; - int64 int_value = 3; - double double_value = 4; - ArrayValue array_value = 5; - KeyValueList kvlist_value = 6; - bytes bytes_value = 7; - } -} - -// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message -// since oneof in AnyValue does not allow repeated fields. -message ArrayValue { - // Array of values. The array may be empty (contain 0 elements). - repeated AnyValue values = 1; -} - -// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message -// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need -// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to -// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches -// are semantically equivalent. -message KeyValueList { - // A collection of key/value pairs of key-value pairs. The list may be empty (may - // contain 0 elements). - // The keys MUST be unique (it is not allowed to have more than one - // value with the same key). - repeated KeyValue values = 1; -} - -// KeyValue is a key-value pair that is used to store Span attributes, Link -// attributes, etc. -message KeyValue { - string key = 1; - AnyValue value = 2; -} - -// InstrumentationScope is a message representing the instrumentation scope information -// such as the fully qualified name and version. -message InstrumentationScope { - // An empty instrumentation scope name means the name is unknown. - string name = 1; - string version = 2; - - // Additional attributes that describe the scope. [Optional]. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated KeyValue attributes = 3; - uint32 dropped_attributes_count = 4; -} diff --git a/src/handlers/http/otel/opentelemetry/proto/logs/v1/logs.proto b/src/handlers/http/otel/opentelemetry/proto/logs/v1/logs.proto deleted file mode 100644 index 0b4b64972..000000000 --- a/src/handlers/http/otel/opentelemetry/proto/logs/v1/logs.proto +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.logs.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Logs.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.logs.v1"; -option java_outer_classname = "LogsProto"; -option go_package = "go.opentelemetry.io/proto/otlp/logs/v1"; - -// LogsData represents the logs data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP logs data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message LogsData { - // An array of ResourceLogs. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceLogs resource_logs = 1; -} - -// A collection of ScopeLogs from a Resource. -message ResourceLogs { - reserved 1000; - - // The resource for the logs in this message. - // If this field is not set then resource info is unknown. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeLogs that originate from a resource. - repeated ScopeLogs scope_logs = 2; - - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_logs" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Logs produced by a Scope. -message ScopeLogs { - // The instrumentation scope information for the logs in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of log records. - repeated LogRecord log_records = 2; - - // This schema_url applies to all logs in the "logs" field. - string schema_url = 3; -} - -// Possible values for LogRecord.SeverityNumber. -enum SeverityNumber { - // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. - SEVERITY_NUMBER_UNSPECIFIED = 0; - SEVERITY_NUMBER_TRACE = 1; - SEVERITY_NUMBER_TRACE2 = 2; - SEVERITY_NUMBER_TRACE3 = 3; - SEVERITY_NUMBER_TRACE4 = 4; - SEVERITY_NUMBER_DEBUG = 5; - SEVERITY_NUMBER_DEBUG2 = 6; - SEVERITY_NUMBER_DEBUG3 = 7; - SEVERITY_NUMBER_DEBUG4 = 8; - SEVERITY_NUMBER_INFO = 9; - SEVERITY_NUMBER_INFO2 = 10; - SEVERITY_NUMBER_INFO3 = 11; - SEVERITY_NUMBER_INFO4 = 12; - SEVERITY_NUMBER_WARN = 13; - SEVERITY_NUMBER_WARN2 = 14; - SEVERITY_NUMBER_WARN3 = 15; - SEVERITY_NUMBER_WARN4 = 16; - SEVERITY_NUMBER_ERROR = 17; - SEVERITY_NUMBER_ERROR2 = 18; - SEVERITY_NUMBER_ERROR3 = 19; - SEVERITY_NUMBER_ERROR4 = 20; - SEVERITY_NUMBER_FATAL = 21; - SEVERITY_NUMBER_FATAL2 = 22; - SEVERITY_NUMBER_FATAL3 = 23; - SEVERITY_NUMBER_FATAL4 = 24; -} - -// LogRecordFlags is defined as a protobuf 'uint32' type and is to be used as -// bit-fields. Each non-zero value defined in this enum is a bit-mask. -// To extract the bit-field, for example, use an expression like: -// -// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) -// -enum LogRecordFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - LOG_RECORD_FLAGS_DO_NOT_USE = 0; - - // Bits 0-7 are used for trace flags. - LOG_RECORD_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; - - // Bits 8-31 are reserved for future use. -} - -// A log record according to OpenTelemetry Log Data Model: -// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md -message LogRecord { - reserved 4; - - // time_unix_nano is the time when the event occurred. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - fixed64 time_unix_nano = 1; - - // Time when the event was observed by the collection system. - // For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) - // this timestamp is typically set at the generation time and is equal to Timestamp. - // For events originating externally and collected by OpenTelemetry (e.g. using - // Collector) this is the time when OpenTelemetry's code observed the event measured - // by the clock of the OpenTelemetry code. This field MUST be set once the event is - // observed by OpenTelemetry. - // - // For converting OpenTelemetry log data to formats that support only one timestamp or - // when receiving OpenTelemetry log data by recipients that support only one timestamp - // internally the following logic is recommended: - // - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // Value of 0 indicates unknown or missing timestamp. - fixed64 observed_time_unix_nano = 11; - - // Numerical value of the severity, normalized to values described in Log Data Model. - // [Optional]. - SeverityNumber severity_number = 2; - - // The severity text (also known as log level). The original string representation as - // it is known at the source. [Optional]. - string severity_text = 3; - - // A value containing the body of the log record. Can be for example a human-readable - // string message (including multi-line) describing the event in a free form or it can - // be a structured data composed of arrays and maps of other values. [Optional]. - opentelemetry.proto.common.v1.AnyValue body = 5; - - // Additional attributes that describe the specific event occurrence. [Optional]. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 6; - uint32 dropped_attributes_count = 7; - - // Flags, a bit field. 8 least significant bits are the trace flags as - // defined in W3C Trace Context specification. 24 most significant bits are reserved - // and must be set to 0. Readers must not assume that 24 most significant bits - // will be zero and must correctly mask the bits when reading 8-bit trace flag (use - // flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional]. - fixed32 flags = 8; - - // A unique identifier for a trace. All logs from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is optional. - // - // The receivers SHOULD assume that the log record is not associated with a - // trace if any of the following is true: - // - the field is not present, - // - the field contains an invalid value. - bytes trace_id = 9; - - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is optional. If the sender specifies a valid span_id then it SHOULD also - // specify a valid trace_id. - // - // The receivers SHOULD assume that the log record is not associated with a - // span if any of the following is true: - // - the field is not present, - // - the field contains an invalid value. - bytes span_id = 10; -} diff --git a/src/handlers/http/otel/opentelemetry/proto/metrics/v1/metrics.proto b/src/handlers/http/otel/opentelemetry/proto/metrics/v1/metrics.proto deleted file mode 100644 index 00c5112ce..000000000 --- a/src/handlers/http/otel/opentelemetry/proto/metrics/v1/metrics.proto +++ /dev/null @@ -1,714 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.metrics.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.metrics.v1"; -option java_outer_classname = "MetricsProto"; -option go_package = "go.opentelemetry.io/proto/otlp/metrics/v1"; - -// MetricsData represents the metrics data that can be stored in a persistent -// storage, OR can be embedded by other protocols that transfer OTLP metrics -// data but do not implement the OTLP protocol. -// -// MetricsData -// └─── ResourceMetrics -// ├── Resource -// ├── SchemaURL -// └── ScopeMetrics -// ├── Scope -// ├── SchemaURL -// └── Metric -// ├── Name -// ├── Description -// ├── Unit -// └── data -// ├── Gauge -// ├── Sum -// ├── Histogram -// ├── ExponentialHistogram -// └── Summary -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message MetricsData { - // An array of ResourceMetrics. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceMetrics resource_metrics = 1; -} - -// A collection of ScopeMetrics from a Resource. -message ResourceMetrics { - reserved 1000; - - // The resource for the metrics in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of metrics that originate from a resource. - repeated ScopeMetrics scope_metrics = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_metrics" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Metrics produced by an Scope. -message ScopeMetrics { - // The instrumentation scope information for the metrics in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of metrics that originate from an instrumentation library. - repeated Metric metrics = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all metrics in the "metrics" field. - string schema_url = 3; -} - -// Defines a Metric which has one or more timeseries. The following is a -// brief summary of the Metric data model. For more details, see: -// -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md -// -// The data model and relation between entities is shown in the -// diagram below. Here, "DataPoint" is the term used to refer to any -// one of the specific data point value types, and "points" is the term used -// to refer to any one of the lists of points contained in the Metric. -// -// - Metric is composed of a metadata and data. -// - Metadata part contains a name, description, unit. -// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). -// - DataPoint contains timestamps, attributes, and one of the possible value type -// fields. -// -// Metric -// +------------+ -// |name | -// |description | -// |unit | +------------------------------------+ -// |data |---> |Gauge, Sum, Histogram, Summary, ... | -// +------------+ +------------------------------------+ -// -// Data [One of Gauge, Sum, Histogram, Summary, ...] -// +-----------+ -// |... | // Metadata about the Data. -// |points |--+ -// +-----------+ | -// | +---------------------------+ -// | |DataPoint 1 | -// v |+------+------+ +------+ | -// +-----+ ||label |label |...|label | | -// | 1 |-->||value1|value2|...|valueN| | -// +-----+ |+------+------+ +------+ | -// | . | |+-----+ | -// | . | ||value| | -// | . | |+-----+ | -// | . | +---------------------------+ -// | . | . -// | . | . -// | . | . -// | . | +---------------------------+ -// | . | |DataPoint M | -// +-----+ |+------+------+ +------+ | -// | M |-->||label |label |...|label | | -// +-----+ ||value1|value2|...|valueN| | -// |+------+------+ +------+ | -// |+-----+ | -// ||value| | -// |+-----+ | -// +---------------------------+ -// -// Each distinct type of DataPoint represents the output of a specific -// aggregation function, the result of applying the DataPoint's -// associated function of to one or more measurements. -// -// All DataPoint types have three common fields: -// - Attributes includes key-value pairs associated with the data point -// - TimeUnixNano is required, set to the end time of the aggregation -// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints -// having an AggregationTemporality field, as discussed below. -// -// Both TimeUnixNano and StartTimeUnixNano values are expressed as -// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. -// -// # TimeUnixNano -// -// This field is required, having consistent interpretation across -// DataPoint types. TimeUnixNano is the moment corresponding to when -// the data point's aggregate value was captured. -// -// Data points with the 0 value for TimeUnixNano SHOULD be rejected -// by consumers. -// -// # StartTimeUnixNano -// -// StartTimeUnixNano in general allows detecting when a sequence of -// observations is unbroken. This field indicates to consumers the -// start time for points with cumulative and delta -// AggregationTemporality, and it should be included whenever possible -// to support correct rate calculation. Although it may be omitted -// when the start time is truly unknown, setting StartTimeUnixNano is -// strongly encouraged. -message Metric { - reserved 4, 6, 8; - - // name of the metric. - string name = 1; - - // description of the metric, which can be used in documentation. - string description = 2; - - // unit in which the metric value is reported. Follows the format - // described by http://unitsofmeasure.org/ucum.html. - string unit = 3; - - // Data determines the aggregation type (if any) of the metric, what is the - // reported value type for the data points, as well as the relatationship to - // the time interval over which they are reported. - oneof data { - Gauge gauge = 5; - Sum sum = 7; - Histogram histogram = 9; - ExponentialHistogram exponential_histogram = 10; - Summary summary = 11; - } - - // Additional metadata attributes that describe the metric. [Optional]. - // Attributes are non-identifying. - // Consumers SHOULD NOT need to be aware of these attributes. - // These attributes MAY be used to encode information allowing - // for lossless roundtrip translation to / from another data model. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue metadata = 12; -} - -// Gauge represents the type of a scalar metric that always exports the -// "current value" for every data point. It should be used for an "unknown" -// aggregation. -// -// A Gauge does not support different aggregation temporalities. Given the -// aggregation is unknown, points cannot be combined using the same -// aggregation, regardless of aggregation temporalities. Therefore, -// AggregationTemporality is not included. Consequently, this also means -// "StartTimeUnixNano" is ignored for all data points. -message Gauge { - repeated NumberDataPoint data_points = 1; -} - -// Sum represents the type of a scalar metric that is calculated as a sum of all -// reported measurements over a time interval. -message Sum { - repeated NumberDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; - - // If "true" means that the sum is monotonic. - bool is_monotonic = 3; -} - -// Histogram represents the type of a metric that is calculated by aggregating -// as a Histogram of all reported measurements over a time interval. -message Histogram { - repeated HistogramDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; -} - -// ExponentialHistogram represents the type of a metric that is calculated by aggregating -// as a ExponentialHistogram of all reported double measurements over a time interval. -message ExponentialHistogram { - repeated ExponentialHistogramDataPoint data_points = 1; - - // aggregation_temporality describes if the aggregator reports delta changes - // since last report time, or cumulative changes since a fixed start time. - AggregationTemporality aggregation_temporality = 2; -} - -// Summary metric data are used to convey quantile summaries, -// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) -// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) -// data type. These data points cannot always be merged in a meaningful way. -// While they can be useful in some applications, histogram data points are -// recommended for new applications. -// Summary metrics do not have an aggregation temporality field. This is -// because the count and sum fields of a SummaryDataPoint are assumed to be -// cumulative values. -message Summary { - repeated SummaryDataPoint data_points = 1; -} - -// AggregationTemporality defines how a metric aggregator reports aggregated -// values. It describes how those values relate to the time interval over -// which they are aggregated. -enum AggregationTemporality { - // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. - AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; - - // DELTA is an AggregationTemporality for a metric aggregator which reports - // changes since last report time. Successive metrics contain aggregation of - // values from continuous and non-overlapping intervals. - // - // The values for a DELTA metric are based only on the time interval - // associated with one measurement cycle. There is no dependency on - // previous measurements like is the case for CUMULATIVE metrics. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // DELTA metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0+1 to - // t_0+2 with a value of 2. - AGGREGATION_TEMPORALITY_DELTA = 1; - - // CUMULATIVE is an AggregationTemporality for a metric aggregator which - // reports changes since a fixed start time. This means that current values - // of a CUMULATIVE metric depend on all previous measurements since the - // start time. Because of this, the sender is required to retain this state - // in some form. If this state is lost or invalidated, the CUMULATIVE metric - // values MUST be reset and a new fixed start time following the last - // reported measurement time sent MUST be used. - // - // For example, consider a system measuring the number of requests that - // it receives and reports the sum of these requests every second as a - // CUMULATIVE metric: - // - // 1. The system starts receiving at time=t_0. - // 2. A request is received, the system measures 1 request. - // 3. A request is received, the system measures 1 request. - // 4. A request is received, the system measures 1 request. - // 5. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+1 with a value of 3. - // 6. A request is received, the system measures 1 request. - // 7. A request is received, the system measures 1 request. - // 8. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_0 to - // t_0+2 with a value of 5. - // 9. The system experiences a fault and loses state. - // 10. The system recovers and resumes receiving at time=t_1. - // 11. A request is received, the system measures 1 request. - // 12. The 1 second collection cycle ends. A metric is exported for the - // number of requests received over the interval of time t_1 to - // t_0+1 with a value of 1. - // - // Note: Even though, when reporting changes since last report time, using - // CUMULATIVE is valid, it is not recommended. This may cause problems for - // systems that do not use start_time to determine when the aggregation - // value was reset (e.g. Prometheus). - AGGREGATION_TEMPORALITY_CUMULATIVE = 2; -} - -// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a -// bit-field representing 32 distinct boolean flags. Each flag defined in this -// enum is a bit-mask. To test the presence of a single flag in the flags of -// a data point, for example, use an expression like: -// -// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK -// -enum DataPointFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - DATA_POINT_FLAGS_DO_NOT_USE = 0; - - // This DataPoint is valid but has no recorded value. This value - // SHOULD be used to reflect explicitly missing data in a series, as - // for an equivalent to the Prometheus "staleness marker". - DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1; - - // Bits 2-31 are reserved for future use. -} - -// NumberDataPoint is a single data point in a timeseries that describes the -// time-varying scalar value of a metric. -message NumberDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // The value itself. A point is considered invalid when one of the recognized - // value fields is not present inside this oneof. - oneof value { - double as_double = 4; - sfixed64 as_int = 6; - } - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 5; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 8; -} - -// HistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Histogram. A Histogram contains summary statistics -// for a population of values, it may optionally contain the distribution of -// those values across a set of buckets. -// -// If the histogram contains the distribution of values, then both -// "explicit_bounds" and "bucket counts" fields must be defined. -// If the histogram does not contain the distribution of values, then both -// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and -// "sum" are known. -message HistogramDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // count is the number of values in the population. Must be non-negative. This - // value must be equal to the sum of the "count" fields in buckets if a - // histogram is provided. - fixed64 count = 4; - - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram - optional double sum = 5; - - // bucket_counts is an optional field contains the count values of histogram - // for each bucket. - // - // The sum of the bucket_counts must equal the value in the count field. - // - // The number of elements in bucket_counts array must be by one greater than - // the number of elements in explicit_bounds array. - repeated fixed64 bucket_counts = 6; - - // explicit_bounds specifies buckets with explicitly defined bounds for values. - // - // The boundaries for bucket at index i are: - // - // (-infinity, explicit_bounds[i]] for i == 0 - // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) - // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) - // - // The values in the explicit_bounds array must be strictly increasing. - // - // Histogram buckets are inclusive of their upper boundary, except the last - // bucket where the boundary is at infinity. This format is intentionally - // compatible with the OpenMetrics histogram definition. - repeated double explicit_bounds = 7; - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 8; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 10; - - // min is the minimum value over (start_time, end_time]. - optional double min = 11; - - // max is the maximum value over (start_time, end_time]. - optional double max = 12; -} - -// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the -// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains -// summary statistics for a population of values, it may optionally contain the -// distribution of those values across a set of buckets. -// -message ExponentialHistogramDataPoint { - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // count is the number of values in the population. Must be - // non-negative. This value must be equal to the sum of the "bucket_counts" - // values in the positive and negative Buckets plus the "zero_count" field. - fixed64 count = 4; - - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram - optional double sum = 5; - - // scale describes the resolution of the histogram. Boundaries are - // located at powers of the base, where: - // - // base = (2^(2^-scale)) - // - // The histogram bucket identified by `index`, a signed integer, - // contains values that are greater than (base^index) and - // less than or equal to (base^(index+1)). - // - // The positive and negative ranges of the histogram are expressed - // separately. Negative values are mapped by their absolute value - // into the negative range using the same scale as the positive range. - // - // scale is not restricted by the protocol, as the permissible - // values depend on the range of the data. - sint32 scale = 6; - - // zero_count is the count of values that are either exactly zero or - // within the region considered zero by the instrumentation at the - // tolerated degree of precision. This bucket stores values that - // cannot be expressed using the standard exponential formula as - // well as values that have been rounded to zero. - // - // Implementations MAY consider the zero bucket to have probability - // mass equal to (zero_count / count). - fixed64 zero_count = 7; - - // positive carries the positive range of exponential bucket counts. - Buckets positive = 8; - - // negative carries the negative range of exponential bucket counts. - Buckets negative = 9; - - // Buckets are a set of bucket counts, encoded in a contiguous array - // of counts. - message Buckets { - // Offset is the bucket index of the first entry in the bucket_counts array. - // - // Note: This uses a varint encoding as a simple form of compression. - sint32 offset = 1; - - // bucket_counts is an array of count values, where bucket_counts[i] carries - // the count of the bucket at index (offset+i). bucket_counts[i] is the count - // of values greater than base^(offset+i) and less than or equal to - // base^(offset+i+1). - // - // Note: By contrast, the explicit HistogramDataPoint uses - // fixed64. This field is expected to have many buckets, - // especially zeros, so uint64 has been selected to ensure - // varint encoding. - repeated uint64 bucket_counts = 2; - } - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 10; - - // (Optional) List of exemplars collected from - // measurements that were used to form the data point - repeated Exemplar exemplars = 11; - - // min is the minimum value over (start_time, end_time]. - optional double min = 12; - - // max is the maximum value over (start_time, end_time]. - optional double max = 13; - - // ZeroThreshold may be optionally set to convey the width of the zero - // region. Where the zero region is defined as the closed interval - // [-ZeroThreshold, ZeroThreshold]. - // When ZeroThreshold is 0, zero count bucket stores values that cannot be - // expressed using the standard exponential formula as well as values that - // have been rounded to zero. - double zero_threshold = 14; -} - -// SummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. The count and sum fields represent -// cumulative values. -message SummaryDataPoint { - reserved 1; - - // The set of key/value pairs that uniquely identify the timeseries from - // where this point belongs. The list may be empty (may contain 0 elements). - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 7; - - // StartTimeUnixNano is optional but strongly encouraged, see the - // the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 start_time_unix_nano = 2; - - // TimeUnixNano is required, see the detailed comments above Metric. - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 3; - - // count is the number of values in the population. Must be non-negative. - fixed64 count = 4; - - // sum of the values in the population. If count is zero then this field - // must be zero. - // - // Note: Sum should only be filled out when measuring non-negative discrete - // events, and is assumed to be monotonic over the values of these events. - // Negative events *can* be recorded, but sum should not be filled out when - // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary - double sum = 5; - - // Represents the value at a given quantile of a distribution. - // - // To record Min and Max values following conventions are used: - // - The 1.0 quantile is equivalent to the maximum value observed. - // - The 0.0 quantile is equivalent to the minimum value observed. - // - // See the following issue for more context: - // https://github.com/open-telemetry/opentelemetry-proto/issues/125 - message ValueAtQuantile { - // The quantile of a distribution. Must be in the interval - // [0.0, 1.0]. - double quantile = 1; - - // The value at the given quantile of a distribution. - // - // Quantile values must NOT be negative. - double value = 2; - } - - // (Optional) list of values at different quantiles of the distribution calculated - // from the current snapshot. The quantiles must be strictly increasing. - repeated ValueAtQuantile quantile_values = 6; - - // Flags that apply to this specific data point. See DataPointFlags - // for the available flags and their meaning. - uint32 flags = 8; -} - -// A representation of an exemplar, which is a sample input measurement. -// Exemplars also hold information about the environment when the measurement -// was recorded, for example the span and trace ID of the active span when the -// exemplar was recorded. -message Exemplar { - reserved 1; - - // The set of key/value pairs that were filtered out by the aggregator, but - // recorded alongside the original measurement. Only key/value pairs that were - // filtered out by the aggregator should be included - repeated opentelemetry.proto.common.v1.KeyValue filtered_attributes = 7; - - // time_unix_nano is the exact time when this exemplar was recorded - // - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January - // 1970. - fixed64 time_unix_nano = 2; - - // The value of the measurement that was recorded. An exemplar is - // considered invalid when one of the recognized value fields is not present - // inside this oneof. - oneof value { - double as_double = 3; - sfixed64 as_int = 6; - } - - // (Optional) Span ID of the exemplar trace. - // span_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - bytes span_id = 4; - - // (Optional) Trace ID of the exemplar trace. - // trace_id may be missing if the measurement is not recorded inside a trace - // or if the trace is not sampled. - bytes trace_id = 5; -} diff --git a/src/handlers/http/otel/opentelemetry/proto/resource/v1/resource.proto b/src/handlers/http/otel/opentelemetry/proto/resource/v1/resource.proto deleted file mode 100644 index 6637560bc..000000000 --- a/src/handlers/http/otel/opentelemetry/proto/resource/v1/resource.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.resource.v1; - -import "opentelemetry/proto/common/v1/common.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Resource.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.resource.v1"; -option java_outer_classname = "ResourceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/resource/v1"; - -// Resource information. -message Resource { - // Set of attributes that describe the resource. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; - - // dropped_attributes_count is the number of dropped attributes. If the value is 0, then - // no attributes were dropped. - uint32 dropped_attributes_count = 2; -} diff --git a/src/handlers/http/otel/opentelemetry/proto/trace/v1/trace.proto b/src/handlers/http/otel/opentelemetry/proto/trace/v1/trace.proto deleted file mode 100644 index 24442853e..000000000 --- a/src/handlers/http/otel/opentelemetry/proto/trace/v1/trace.proto +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package opentelemetry.proto.trace.v1; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Trace.V1"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.trace.v1"; -option java_outer_classname = "TraceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/trace/v1"; - -// TracesData represents the traces data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP traces data but do -// not implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message TracesData { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceSpans resource_spans = 1; -} - -// A collection of ScopeSpans from a Resource. -message ResourceSpans { - reserved 1000; - - // The resource for the spans in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeSpans that originate from a resource. - repeated ScopeSpans scope_spans = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_spans" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Spans produced by an InstrumentationScope. -message ScopeSpans { - // The instrumentation scope information for the spans in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of Spans that originate from an instrumentation scope. - repeated Span spans = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all spans and span events in the "spans" field. - string schema_url = 3; -} - -// A Span represents a single operation performed by a single component of the system. -// -// The next available field id is 17. -message Span { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - bytes trace_id = 1; - - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - bytes span_id = 2; - - // trace_state conveys information about request position in multiple distributed tracing graphs. - // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header - // See also https://github.com/w3c/distributed-tracing for more details about this field. - string trace_state = 3; - - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - bytes parent_span_id = 4; - - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether a span's parent - // is remote. The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // When creating span messages, if the message is logically forwarded from another source - // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD - // be copied as-is. If creating from a source that does not have an equivalent flags field - // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST - // be set to zero. - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // - // [Optional]. - fixed32 flags = 16; - - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // Empty value is equivalent to an unknown span name. - // - // This field is required. - string name = 5; - - // SpanKind is the type of span. Can be used to specify additional relationships between spans - // in addition to a parent/child relationship. - enum SpanKind { - // Unspecified. Do NOT use as default. - // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. - SPAN_KIND_UNSPECIFIED = 0; - - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. - SPAN_KIND_INTERNAL = 1; - - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - SPAN_KIND_SERVER = 2; - - // Indicates that the span describes a request to some remote service. - SPAN_KIND_CLIENT = 3; - - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. - SPAN_KIND_PRODUCER = 4; - - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. - SPAN_KIND_CONSUMER = 5; - } - - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - SpanKind kind = 6; - - // start_time_unix_nano is the start time of the span. On the client side, this is the time - // kept by the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 start_time_unix_nano = 7; - - // end_time_unix_nano is the end time of the span. On the client side, this is the time - // kept by the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - fixed64 end_time_unix_nano = 8; - - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "example.com/myattribute": true - // "example.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; - - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - uint32 dropped_attributes_count = 10; - - // Event is a time-stamped annotation of the span, consisting of user-supplied - // text description and key-value pairs. - message Event { - // time_unix_nano is the time the event occurred. - fixed64 time_unix_nano = 1; - - // name of the event. - // This field is semantically required to be set to non-empty string. - string name = 2; - - // attributes is a collection of attribute key/value pairs on the event. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 3; - - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - uint32 dropped_attributes_count = 4; - } - - // events is a collection of Event items. - repeated Event events = 11; - - // dropped_events_count is the number of dropped events. If the value is 0, then no - // events were dropped. - uint32 dropped_events_count = 12; - - // A pointer from the current span to another span in the same trace or in a - // different trace. For example, this can be used in batching operations, - // where a single batch handler processes multiple requests from different - // traces or when the handler receives a request from a different project. - message Link { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - bytes trace_id = 1; - - // A unique identifier for the linked span. The ID is an 8-byte array. - bytes span_id = 2; - - // The trace_state associated with the link. - string trace_state = 3; - - // attributes is a collection of attribute key/value pairs on the link. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; - - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - uint32 dropped_attributes_count = 5; - - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether the link is remote. - // The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. - // - // [Optional]. - fixed32 flags = 6; - } - - // links is a collection of Links, which are references from this span to a span - // in the same or different trace. - repeated Link links = 13; - - // dropped_links_count is the number of dropped links after the maximum size was - // enforced. If this value is 0, then no links were dropped. - uint32 dropped_links_count = 14; - - // An optional final status for this span. Semantically when Status isn't set, it means - // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). - Status status = 15; -} - -// The Status type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. -message Status { - reserved 1; - - // A developer-facing human readable error message. - string message = 2; - - // For the semantics of status codes see - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status - enum StatusCode { - // The default status. - STATUS_CODE_UNSET = 0; - // The Span has been validated by an Application developer or Operator to - // have completed successfully. - STATUS_CODE_OK = 1; - // The Span contains an error. - STATUS_CODE_ERROR = 2; - }; - - // The status code. - StatusCode code = 3; -} - -// SpanFlags represents constants used to interpret the -// Span.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) -// -// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. -// -// Note that Span flags were introduced in version 1.1 of the -// OpenTelemetry protocol. Older Span producers do not set this -// field, consequently consumers should not rely on the absence of a -// particular flag bit to indicate the presence of a particular feature. -enum SpanFlags { - // The zero value for the enum. Should not be used for comparisons. - // Instead use bitwise "and" with the appropriate mask as shown above. - SPAN_FLAGS_DO_NOT_USE = 0; - - // Bits 0-7 are used for trace flags. - SPAN_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; - - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. - SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK = 0x00000100; - SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK = 0x00000200; - - // Bits 10-31 are reserved for future use. -} diff --git a/src/handlers/http/otel/proto.rs b/src/handlers/http/otel/proto.rs deleted file mode 100644 index f734c5bc8..000000000 --- a/src/handlers/http/otel/proto.rs +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Parseable Server (C) 2022 - 2024 Parseable, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -/// Common types used across all event types. -pub mod common { - pub mod v1 { - include!("compiled_protos/opentelemetry.proto.common.v1.rs"); - } -} - -/// Generated types used for logs. -pub mod logs { - pub mod v1 { - include!("compiled_protos/opentelemetry.proto.logs.v1.rs"); - } -} - -/// Generated types used in resources. -pub mod resource { - pub mod v1 { - include!("compiled_protos/opentelemetry.proto.resource.v1.rs"); - } -} - -/// Generated types used in metrics. -pub mod metrics { - pub mod v1 { - include!("compiled_protos/opentelemetry.proto.metrics.v1.rs"); - } -} - -/// Generated types used in traces. -pub mod trace { - pub mod v1 { - include!("compiled_protos/opentelemetry.proto.trace.v1.rs"); - } -} diff --git a/src/handlers/http/otel/traces.rs b/src/handlers/http/otel/traces.rs index 09084b503..1f7e736ea 100644 --- a/src/handlers/http/otel/traces.rs +++ b/src/handlers/http/otel/traces.rs @@ -16,41 +16,40 @@ * */ -use super::insert_attributes; -use super::insert_if_some; -use super::insert_number_if_some; -use super::proto::trace::v1::span::Event; -use super::proto::trace::v1::span::Link; -use super::proto::trace::v1::ScopeSpans; -use super::proto::trace::v1::Span; -use super::proto::trace::v1::Status; -use super::proto::trace::v1::TracesData; use bytes::Bytes; +use opentelemetry_proto::tonic::trace::v1::span::Event; +use opentelemetry_proto::tonic::trace::v1::span::Link; +use opentelemetry_proto::tonic::trace::v1::ScopeSpans; +use opentelemetry_proto::tonic::trace::v1::Span; +use opentelemetry_proto::tonic::trace::v1::Status; +use opentelemetry_proto::tonic::trace::v1::TracesData; use serde_json::Value; use std::collections::BTreeMap; +use super::insert_attributes; + /// this function flattens the `ScopeSpans` object /// and returns a `Vec` of `BTreeMap` of the flattened json fn flatten_scope_span(scope_span: &ScopeSpans) -> Vec> { let mut vec_scope_span_json = Vec::new(); let mut scope_span_json = BTreeMap::new(); - if let Some(spans) = &scope_span.spans { - for span in spans { - let span_record_json = flatten_span_record(span); - vec_scope_span_json.extend(span_record_json); - } + for span in &scope_span.spans { + let span_record_json = flatten_span_record(span); + vec_scope_span_json.extend(span_record_json); } if let Some(scope) = &scope_span.scope { - insert_if_some(&mut scope_span_json, "scope_name", &scope.name); - insert_if_some(&mut scope_span_json, "scope_version", &scope.version); + scope_span_json.insert("scope_name".to_string(), Value::String(scope.name.clone())); + scope_span_json.insert( + "scope_version".to_string(), + Value::String(scope.version.clone()), + ); insert_attributes(&mut scope_span_json, &scope.attributes); - insert_number_if_some( - &mut scope_span_json, - "scope_dropped_attributes_count", - &scope.dropped_attributes_count.map(|f| f as f64), + scope_span_json.insert( + "scope_dropped_attributes_count".to_string(), + Value::Number(scope.dropped_attributes_count.into()), ); for span_json in &mut vec_scope_span_json { @@ -60,10 +59,11 @@ fn flatten_scope_span(scope_span: &ScopeSpans) -> Vec> { } } - if let Some(schema_url) = &scope_span.schema_url { - for span_json in &mut vec_scope_span_json { - span_json.insert("schema_url".to_string(), Value::String(schema_url.clone())); - } + for span_json in &mut vec_scope_span_json { + span_json.insert( + "schema_url".to_string(), + Value::String(scope_span.schema_url.clone()), + ); } vec_scope_span_json @@ -76,41 +76,35 @@ pub fn flatten_otel_traces(body: &Bytes) -> Vec> { let message: TracesData = serde_json::from_str(body_str).unwrap(); let mut vec_otel_json = Vec::new(); - if let Some(records) = &message.resource_spans { - for record in records { - let mut resource_span_json = BTreeMap::new(); + for record in &message.resource_spans { + let mut resource_span_json = BTreeMap::new(); - if let Some(resource) = &record.resource { - insert_attributes(&mut resource_span_json, &resource.attributes); - insert_number_if_some( - &mut resource_span_json, - "resource_dropped_attributes_count", - &resource.dropped_attributes_count.map(|f| f as f64), - ); - } + if let Some(resource) = &record.resource { + insert_attributes(&mut resource_span_json, &resource.attributes); + resource_span_json.insert( + "resource_dropped_attributes_count".to_string(), + Value::Number(resource.dropped_attributes_count.into()), + ); + } - let mut vec_resource_spans_json = Vec::new(); - if let Some(scope_spans) = &record.scope_spans { - for scope_span in scope_spans { - let scope_span_json = flatten_scope_span(scope_span); - vec_resource_spans_json.extend(scope_span_json); - } - } + let mut vec_resource_spans_json = Vec::new(); + for scope_span in &record.scope_spans { + let scope_span_json = flatten_scope_span(scope_span); + vec_resource_spans_json.extend(scope_span_json); + } - insert_if_some( - &mut resource_span_json, - "resource_span_schema_url", - &record.schema_url, - ); + resource_span_json.insert( + "schema_url".to_string(), + Value::String(record.schema_url.clone()), + ); - for resource_spans_json in &mut vec_resource_spans_json { - for (key, value) in &resource_span_json { - resource_spans_json.insert(key.clone(), value.clone()); - } + for resource_spans_json in &mut vec_resource_spans_json { + for (key, value) in &resource_span_json { + resource_spans_json.insert(key.clone(), value.clone()); } - - vec_otel_json.extend(vec_resource_spans_json); } + + vec_otel_json.extend(vec_resource_spans_json); } vec_otel_json @@ -124,17 +118,15 @@ fn flatten_events(events: &[Event]) -> Vec> { .iter() .map(|event| { let mut event_json = BTreeMap::new(); - insert_if_some( - &mut event_json, - "event_time_unix_nano", - &event.time_unix_nano, + event_json.insert( + "event_time_unix_nano".to_string(), + Value::Number(event.time_unix_nano.into()), ); - insert_if_some(&mut event_json, "event_name", &event.name); + event_json.insert("event_name".to_string(), Value::String(event.name.clone())); insert_attributes(&mut event_json, &event.attributes); - insert_number_if_some( - &mut event_json, - "event_dropped_attributes_count", - &event.dropped_attributes_count.map(|f| f as f64), + event_json.insert( + "event_dropped_attributes_count".to_string(), + Value::Number(event.dropped_attributes_count.into()), ); event_json }) @@ -149,13 +141,19 @@ fn flatten_links(links: &[Link]) -> Vec> { .iter() .map(|link| { let mut link_json = BTreeMap::new(); - insert_if_some(&mut link_json, "link_trace_id", &link.trace_id); - insert_if_some(&mut link_json, "link_span_id", &link.span_id); + link_json.insert( + "link_span_id".to_string(), + Value::String(hex::encode(&link.span_id)), + ); + link_json.insert( + "link_trace_id".to_string(), + Value::String(hex::encode(&link.trace_id)), + ); + insert_attributes(&mut link_json, &link.attributes); - insert_number_if_some( - &mut link_json, - "link_dropped_attributes_count", - &link.dropped_attributes_count.map(|f| f as f64), + link_json.insert( + "link_dropped_attributes_count".to_string(), + Value::Number(link.dropped_attributes_count.into()), ); link_json }) @@ -168,25 +166,24 @@ fn flatten_links(links: &[Link]) -> Vec> { /// and adds it to the flattened json fn flatten_status(status: &Status) -> BTreeMap { let mut status_json = BTreeMap::new(); - insert_if_some(&mut status_json, "span_status_message", &status.message); - insert_number_if_some( - &mut status_json, - "span_status_code", - &status.code.map(|f| f as f64), + status_json.insert( + "span_status_message".to_string(), + Value::String(status.message.clone()), + ); + status_json.insert( + "span_status_code".to_string(), + Value::Number(status.code.into()), + ); + let description = match status.code { + 0 => "STATUS_CODE_UNSET", + 1 => "STATUS_CODE_OK", + 2 => "STATUS_CODE_ERROR", + _ => "", + }; + status_json.insert( + "span_status_description".to_string(), + Value::String(description.to_string()), ); - - if let Some(code) = status.code { - let description = match code { - 0 => "STATUS_CODE_UNSET", - 1 => "STATUS_CODE_OK", - 2 => "STATUS_CODE_ERROR", - _ => "", - }; - status_json.insert( - "span_status_description".to_string(), - Value::String(description.to_string()), - ); - } status_json } @@ -195,23 +192,20 @@ fn flatten_status(status: &Status) -> BTreeMap { /// there is a mapping of flags to flags description provided in proto /// this function fetches the flags description from the flags /// and adds it to the flattened json -fn flatten_flags(flags: &Option) -> BTreeMap { +fn flatten_flags(flags: u32) -> BTreeMap { let mut flags_json = BTreeMap::new(); - insert_number_if_some(&mut flags_json, "span_flags", &flags.map(|f| f as f64)); - - if let Some(flag) = flags { - let description = match flag { - 0 => "SPAN_FLAGS_DO_NOT_USE", - 255 => "SPAN_FLAGS_TRACE_FLAGS_MASK", - 256 => "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK", - 512 => "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK", - _ => "", - }; - flags_json.insert( - "span_flags_description".to_string(), - Value::String(description.to_string()), - ); - } + flags_json.insert("span_flags".to_string(), Value::Number(flags.into())); + let description = match flags { + 0 => "SPAN_FLAGS_DO_NOT_USE", + 255 => "SPAN_FLAGS_TRACE_FLAGS_MASK", + 256 => "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK", + 512 => "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK", + _ => "", + }; + flags_json.insert( + "span_flags_description".to_string(), + Value::String(description.to_string()), + ); flags_json } @@ -220,25 +214,22 @@ fn flatten_flags(flags: &Option) -> BTreeMap { /// there is a mapping of kind to kind description provided in proto /// this function fetches the kind description from the kind /// and adds it to the flattened json -fn flatten_kind(kind: &Option) -> BTreeMap { +fn flatten_kind(kind: i32) -> BTreeMap { let mut kind_json = BTreeMap::new(); - insert_number_if_some(&mut kind_json, "span_kind", &kind.map(|k| k as f64)); - - if let Some(kind) = kind { - let description = match kind { - 0 => "SPAN_KIND_UNSPECIFIED", - 1 => "SPAN_KIND_INTERNAL", - 2 => "SPAN_KIND_SERVER", - 3 => "SPAN_KIND_CLIENT", - 4 => "SPAN_KIND_PRODUCER", - 5 => "SPAN_KIND_CONSUMER", - _ => "", - }; - kind_json.insert( - "span_kind_description".to_string(), - Value::String(description.to_string()), - ); - } + kind_json.insert("span_kind".to_string(), Value::Number(kind.into())); + let description = match kind { + 0 => "SPAN_KIND_UNSPECIFIED", + 1 => "SPAN_KIND_INTERNAL", + 2 => "SPAN_KIND_SERVER", + 3 => "SPAN_KIND_CLIENT", + 4 => "SPAN_KIND_PRODUCER", + 5 => "SPAN_KIND_CONSUMER", + _ => "", + }; + kind_json.insert( + "span_kind_description".to_string(), + Value::String(description.to_string()), + ); kind_json } @@ -250,57 +241,50 @@ fn flatten_span_record(span_record: &Span) -> Vec> { let mut span_records_json = Vec::new(); let mut span_record_json = BTreeMap::new(); - insert_if_some( - &mut span_record_json, - "span_trace_id", - &span_record.trace_id, + span_record_json.insert( + "span_trace_id".to_string(), + Value::String(hex::encode(&span_record.span_id)), + ); + span_record_json.insert( + "span_span_id".to_string(), + Value::String(hex::encode(&span_record.trace_id)), ); - insert_if_some(&mut span_record_json, "span_span_id", &span_record.span_id); - insert_if_some( - &mut span_record_json, - "span_trace_state", - &span_record.trace_state, + span_record_json.insert( + "span_trace_state".to_string(), + Value::String(span_record.trace_state.clone()), ); - insert_if_some( - &mut span_record_json, - "span_parent_span_id", - &span_record.parent_span_id, + span_record_json.insert( + "span_parent_span_id".to_string(), + Value::String(hex::encode(&span_record.parent_span_id)), ); - span_record_json.extend(flatten_flags(&span_record.flags)); - insert_if_some(&mut span_record_json, "span_name", &span_record.name); - span_record_json.extend(flatten_kind(&span_record.kind)); - insert_if_some( - &mut span_record_json, - "span_start_time_unix_nano", - &span_record.start_time_unix_nano, + span_record_json.extend(flatten_flags(span_record.flags)); + span_record_json.insert( + "span_name".to_string(), + Value::String(span_record.name.clone()), ); - insert_if_some( - &mut span_record_json, - "span_end_time_unix_nano", - &span_record.end_time_unix_nano, + span_record_json.extend(flatten_kind(span_record.kind)); + span_record_json.insert( + "span_start_time_unix_nano".to_string(), + Value::Number(span_record.start_time_unix_nano.into()), + ); + span_record_json.insert( + "span_end_time_unix_nano".to_string(), + Value::Number(span_record.end_time_unix_nano.into()), ); insert_attributes(&mut span_record_json, &span_record.attributes); - insert_if_some( - &mut span_record_json, - "span_dropped_attributes_count", - &span_record.dropped_attributes_count, + span_record_json.insert( + "span_dropped_attributes_count".to_string(), + Value::Number(span_record.dropped_attributes_count.into()), ); - if let Some(events) = &span_record.events { - span_records_json.extend(flatten_events(events)); - } - insert_number_if_some( - &mut span_record_json, - "span_dropped_events_count", - &span_record.dropped_events_count.map(|f| f as f64), + span_records_json.extend(flatten_events(&span_record.events)); + span_record_json.insert( + "span_dropped_events_count".to_string(), + Value::Number(span_record.dropped_events_count.into()), ); - if let Some(links) = &span_record.links { - span_records_json.extend(flatten_links(links)); - } - - insert_number_if_some( - &mut span_record_json, - "span_dropped_links_count", - &span_record.dropped_links_count.map(|f| f as f64), + span_records_json.extend(flatten_links(&span_record.links)); + span_record_json.insert( + "span_dropped_links_count".to_string(), + Value::Number(span_record.dropped_links_count.into()), ); if let Some(status) = &span_record.status {