diff --git a/Cargo.lock b/Cargo.lock
index 554ef652b..e87727e47 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1993,6 +1993,12 @@ version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
+[[package]]
+name = "futures-timer"
+version = "3.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24"
+
[[package]]
name = "futures-util"
version = "0.3.30"
@@ -2685,6 +2691,12 @@ dependencies = [
"libc",
]
+[[package]]
+name = "maplit"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d"
+
[[package]]
name = "matchers"
version = "0.1.0"
@@ -3008,6 +3020,52 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
+[[package]]
+name = "opentelemetry"
+version = "0.27.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab70038c28ed37b97d8ed414b6429d343a8bbf44c9f79ec854f3a643029ba6d7"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+ "js-sys",
+ "pin-project-lite",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "opentelemetry-proto"
+version = "0.27.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a6e05acbfada5ec79023c85368af14abd0b307c015e9064d249b2a950ef459a6"
+dependencies = [
+ "hex",
+ "opentelemetry",
+ "opentelemetry_sdk",
+ "prost",
+ "serde",
+ "tonic",
+]
+
+[[package]]
+name = "opentelemetry_sdk"
+version = "0.27.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "231e9d6ceef9b0b2546ddf52335785ce41252bc7474ee8ba05bfad277be13ab8"
+dependencies = [
+ "async-trait",
+ "futures-channel",
+ "futures-executor",
+ "futures-util",
+ "glob",
+ "opentelemetry",
+ "percent-encoding",
+ "rand",
+ "serde_json",
+ "thiserror",
+]
+
[[package]]
name = "ordered-float"
version = "2.10.1"
@@ -3108,6 +3166,7 @@ dependencies = [
"actix-web-static-files",
"anyhow",
"argon2",
+ "arrow",
"arrow-array",
"arrow-flight",
"arrow-ipc",
@@ -3141,12 +3200,14 @@ dependencies = [
"humantime-serde",
"itertools 0.13.0",
"lazy_static",
+ "maplit",
"mime",
"nom",
"num_cpus",
"object_store",
"once_cell",
"openid",
+ "opentelemetry-proto",
"parquet",
"path-clean",
"prometheus",
@@ -3158,6 +3219,7 @@ dependencies = [
"regex",
"relative-path",
"reqwest 0.11.27",
+ "rstest",
"rustls 0.22.4",
"rustls-pemfile 2.1.2",
"semver",
@@ -3823,6 +3885,36 @@ dependencies = [
"windows-sys 0.52.0",
]
+[[package]]
+name = "rstest"
+version = "0.23.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0a2c585be59b6b5dd66a9d2084aa1d8bd52fbdb806eafdeffb52791147862035"
+dependencies = [
+ "futures",
+ "futures-timer",
+ "rstest_macros",
+ "rustc_version",
+]
+
+[[package]]
+name = "rstest_macros"
+version = "0.23.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "825ea780781b15345a146be27eaefb05085e337e869bff01b4306a4fd4a9ad5a"
+dependencies = [
+ "cfg-if",
+ "glob",
+ "proc-macro-crate",
+ "proc-macro2",
+ "quote",
+ "regex",
+ "relative-path",
+ "rustc_version",
+ "syn 2.0.79",
+ "unicode-ident",
+]
+
[[package]]
name = "rustc-demangle"
version = "0.1.23"
diff --git a/Cargo.toml b/Cargo.toml
index b33238052..26d35460d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -106,6 +106,7 @@ prost = "0.13.3"
prometheus-parse = "0.2.5"
sha2 = "0.10.8"
tracing = "0.1.41"
+opentelemetry-proto = "0.27.0"
[build-dependencies]
cargo_toml = "0.20.1"
diff --git a/src/handlers/http/modal/utils/ingest_utils.rs b/src/handlers/http/modal/utils/ingest_utils.rs
index 62344d551..12afc33fc 100644
--- a/src/handlers/http/modal/utils/ingest_utils.rs
+++ b/src/handlers/http/modal/utils/ingest_utils.rs
@@ -16,16 +16,15 @@
*
*/
-use std::{
- collections::{BTreeMap, HashMap},
- sync::Arc,
-};
-
use actix_web::HttpRequest;
use arrow_schema::Field;
use bytes::Bytes;
use chrono::{DateTime, NaiveDateTime, Utc};
use serde_json::Value;
+use std::{
+ collections::{BTreeMap, HashMap},
+ sync::Arc,
+};
use crate::{
event::{
diff --git a/src/handlers/http/otel.rs b/src/handlers/http/otel.rs
index de4b2658c..d199d11ad 100644
--- a/src/handlers/http/otel.rs
+++ b/src/handlers/http/otel.rs
@@ -15,83 +15,92 @@
* along with this program. If not, see .
*
*/
+
pub mod logs;
pub mod metrics;
-#[allow(clippy::all)]
-pub mod proto;
pub mod traces;
-use proto::common::v1::KeyValue;
+use opentelemetry_proto::tonic::common::v1::{any_value::Value as OtelValue, AnyValue, KeyValue};
use serde_json::Value;
use std::collections::BTreeMap;
// Value can be one of types - String, Bool, Int, Double, ArrayValue, AnyValue, KeyValueList, Byte
-pub fn collect_json_from_any_value(
- key: &String,
- value: super::otel::proto::common::v1::Value,
-) -> BTreeMap {
+pub fn collect_json_from_value(key: &String, value: OtelValue) -> BTreeMap {
let mut value_json: BTreeMap = BTreeMap::new();
- insert_if_some(&mut value_json, key, &value.str_val);
- insert_bool_if_some(&mut value_json, key, &value.bool_val);
- insert_if_some(&mut value_json, key, &value.int_val);
- insert_number_if_some(&mut value_json, key, &value.double_val);
-
- //ArrayValue is a vector of AnyValue
- //traverse by recursively calling the same function
- if value.array_val.is_some() {
- let array_val = value.array_val.as_ref().unwrap();
- let values = &array_val.values;
- for value in values {
- let array_value_json = collect_json_from_any_value(key, value.clone());
- for key in array_value_json.keys() {
- value_json.insert(
- format!(
- "{}_{}",
- key.to_owned(),
- value_to_string(array_value_json[key].to_owned())
- ),
- array_value_json[key].to_owned(),
- );
+ match value {
+ OtelValue::StringValue(str_val) => {
+ value_json.insert(key.to_string(), Value::String(str_val));
+ }
+ OtelValue::BoolValue(bool_val) => {
+ value_json.insert(key.to_string(), Value::Bool(bool_val));
+ }
+ OtelValue::IntValue(int_val) => {
+ value_json.insert(key.to_string(), Value::String(int_val.to_string()));
+ }
+ OtelValue::DoubleValue(double_val) => {
+ if let Some(number) = serde_json::Number::from_f64(double_val) {
+ value_json.insert(key.to_string(), Value::Number(number));
}
}
- }
-
- //KeyValueList is a vector of KeyValue
- //traverse through each element in the vector
- if value.kv_list_val.is_some() {
- let kv_list_val = value.kv_list_val.unwrap();
- for key_value in kv_list_val.values {
- let value = key_value.value;
- if value.is_some() {
- let value = value.unwrap();
- let key_value_json = collect_json_from_any_value(key, value);
-
- for key in key_value_json.keys() {
+ OtelValue::ArrayValue(array_val) => {
+ let values = &array_val.values;
+ for value in values {
+ let array_value_json = collect_json_from_anyvalue(key, value.clone());
+ for key in array_value_json.keys() {
value_json.insert(
format!(
- "{}_{}_{}",
+ "{}_{}",
key.to_owned(),
- key_value.key,
- value_to_string(key_value_json[key].to_owned())
+ value_to_string(array_value_json[key].to_owned())
),
- key_value_json[key].to_owned(),
+ array_value_json[key].to_owned(),
);
}
}
}
+ OtelValue::KvlistValue(kv_list_val) => {
+ for key_value in kv_list_val.values {
+ let value = key_value.value;
+ if value.is_some() {
+ let value = value.unwrap();
+ let key_value_json = collect_json_from_anyvalue(key, value.clone());
+
+ for key in key_value_json.keys() {
+ value_json.insert(
+ format!(
+ "{}_{}_{}",
+ key.to_owned(),
+ key_value.key,
+ value_to_string(key_value_json[key].to_owned())
+ ),
+ key_value_json[key].to_owned(),
+ );
+ }
+ }
+ }
+ }
+ OtelValue::BytesValue(bytes_val) => {
+ value_json.insert(
+ key.to_string(),
+ Value::String(String::from_utf8_lossy(&bytes_val).to_string()),
+ );
+ }
}
- insert_if_some(&mut value_json, key, &value.bytes_val);
value_json
}
+pub fn collect_json_from_anyvalue(key: &String, value: AnyValue) -> BTreeMap {
+ collect_json_from_value(key, value.value.unwrap())
+}
+
//traverse through Value by calling function ollect_json_from_any_value
pub fn collect_json_from_values(
- values: &Option,
+ values: &Option,
key: &String,
) -> BTreeMap {
let mut value_json: BTreeMap = BTreeMap::new();
for value in values.iter() {
- value_json = collect_json_from_any_value(key, value.clone());
+ value_json = collect_json_from_anyvalue(key, value.clone());
}
value_json
@@ -142,11 +151,9 @@ pub fn insert_bool_if_some(map: &mut BTreeMap, key: &str, option:
}
}
-pub fn insert_attributes(map: &mut BTreeMap, attributes: &Option>) {
- if let Some(attrs) = attributes {
- let attributes_json = flatten_attributes(attrs);
- for (key, value) in attributes_json {
- map.insert(key, value);
- }
+pub fn insert_attributes(map: &mut BTreeMap, attributes: &Vec) {
+ let attributes_json = flatten_attributes(attributes);
+ for (key, value) in attributes_json {
+ map.insert(key, value);
}
}
diff --git a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.common.v1.rs b/src/handlers/http/otel/compiled_protos/opentelemetry.proto.common.v1.rs
deleted file mode 100644
index bc40d0720..000000000
--- a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.common.v1.rs
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Parseable Server (C) 2022 - 2024 Parseable, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- *
- */
-
- // This file was generated by protoc-gen-rust-protobuf. The file was edited after the generation.
- // All the repeated fields were changed to Option> and the `oneof` fields were changed to Option.
-
- use serde::{Deserialize, Serialize};
- #[derive(Serialize, Deserialize, Debug, Clone)]
- /// AnyValue is used to represent any type of attribute value. AnyValue may contain a
- /// primitive value such as a string or integer or it may contain an arbitrary nested
- /// object containing arrays, key-value lists and primitives.
- pub struct AnyValue {
- /// The value is one of the listed fields. It is valid for all values to be unspecified
- /// in which case this AnyValue is considered to be "empty".
- pub value: Value,
- }
-
- #[derive(Serialize, Deserialize, Debug, Clone)]
- pub struct Value {
- #[serde(rename = "stringValue")]
- pub str_val: Option,
- #[serde(rename = "boolValue")]
- pub bool_val: Option,
- #[serde(rename = "intValue")]
- pub int_val: Option,
- #[serde(rename = "doubleValue")]
- pub double_val: Option,
- #[serde(rename = "arrayValue")]
- pub array_val: Option,
- #[serde(rename = "keyVauleList")]
- pub kv_list_val: Option,
- #[serde(rename = "bytesValue")]
- pub bytes_val: Option,
- }
-
- #[derive(Serialize, Deserialize, Debug, Clone)]
- /// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
- /// since oneof in AnyValue does not allow repeated fields.
- pub struct ArrayValue {
- /// Array of values. The array may be empty (contain 0 elements).
- pub values: Vec,
- }
-
- #[derive(Serialize, Deserialize, Debug, Clone)]
- /// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
- /// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
- /// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
- /// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
- /// are semantically equivalent.
- pub struct KeyValueList {
- /// A collection of key/value pairs of key-value pairs. The list may be empty (may
- /// contain 0 elements).
- /// The keys MUST be unique (it is not allowed to have more than one
- /// value with the same key).
- pub values: Vec,
- }
-
- #[derive(Serialize, Deserialize, Debug, Clone)]
- /// KeyValue is a key-value pair that is used to store Span attributes, Link
- /// attributes, etc.
- pub struct KeyValue {
- pub key: String,
- pub value: Option,
- }
-
- #[derive(Serialize, Deserialize, Debug)]
- /// InstrumentationScope is a message representing the instrumentation scope information
- /// such as the fully qualified name and version.
- pub struct InstrumentationScope {
- /// An empty instrumentation scope name means the name is unknown.
- pub name: Option,
- pub version: Option,
- /// Additional attributes that describe the scope. \[Optional\].
- /// Attribute keys MUST be unique (it is not allowed to have more than one
- /// attribute with the same key).
- pub attributes: Option>,
- #[serde(rename = "droppedAttributesCount")]
- pub dropped_attributes_count: Option,
- }
-
\ No newline at end of file
diff --git a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.logs.v1.rs b/src/handlers/http/otel/compiled_protos/opentelemetry.proto.logs.v1.rs
deleted file mode 100644
index dc63286e3..000000000
--- a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.logs.v1.rs
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Parseable Server (C) 2022 - 2024 Parseable, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- *
- */
-
-// This file was generated by protoc-gen-rust-protobuf. The file was edited after the generation.
- // All the repeated fields were changed to Option>.
-
- use crate::handlers::http::otel::proto::common::v1::InstrumentationScope;
- use crate::handlers::http::otel::proto::common::v1::KeyValue;
- use crate::handlers::http::otel::proto::common::v1::Value;
- use crate::handlers::http::otel::proto::resource::v1::Resource;
- use serde::{Deserialize, Serialize};
-
- #[derive(Serialize, Deserialize, Debug)]
- /// LogsData represents the logs data that can be stored in a persistent storage,
- /// OR can be embedded by other protocols that transfer OTLP logs data but do not
- /// implement the OTLP protocol.
- ///
- /// The main difference between this message and collector protocol is that
- /// in this message there will not be any "control" or "metadata" specific to
- /// OTLP protocol.
- ///
- /// When new fields are added into this message, the OTLP request MUST be updated
- /// as well.
- pub struct LogsData {
- /// An array of ResourceLogs.
- /// For data coming from a single resource this array will typically contain
- /// one element. Intermediary nodes that receive data from multiple origins
- /// typically batch the data before forwarding further and in that case this
- /// array will contain multiple elements.
- #[serde(rename = "resourceLogs")]
- pub resource_logs: Option>,
- }
-
- #[derive(Serialize, Deserialize, Debug)]
- /// A collection of ScopeLogs from a Resource.
- pub struct ResourceLogs {
- /// The resource for the logs in this message.
- /// If this field is not set then resource info is unknown.
- pub resource: Option,
- /// A list of ScopeLogs that originate from a resource.
- #[serde(rename = "scopeLogs")]
- pub scope_logs: Option>,
- /// This schema_url applies to the data in the "resource" field. It does not apply
- /// to the data in the "scope_logs" field which have their own schema_url field.
- #[serde(rename = "schemaUrl")]
- pub schema_url: Option,
- }
-
- #[derive(Serialize, Deserialize, Debug)]
- /// A collection of Logs produced by a Scope.
- pub struct ScopeLogs {
- /// The instrumentation scope information for the logs in this message.
- /// Semantically when InstrumentationScope isn't set, it is equivalent with
- /// an empty instrumentation scope name (unknown).
- pub scope: Option,
- /// A list of log records.
- #[serde(rename = "logRecords")]
- pub log_records: Vec,
- /// This schema_url applies to all logs in the "logs" field.
- #[serde(rename = "schemaUrl")]
- pub schema_url: Option,
- }
-
- #[derive(Serialize, Deserialize, Debug)]
- /// A log record according to OpenTelemetry Log Data Model:
- ///
- pub struct LogRecord {
- /// time_unix_nano is the time when the event occurred.
- /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
- /// Value of 0 indicates unknown or missing timestamp.
- #[serde(rename = "timeUnixNano")]
- pub time_unix_nano: Option,
- /// Time when the event was observed by the collection system.
- /// For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK)
- /// this timestamp is typically set at the generation time and is equal to Timestamp.
- /// For events originating externally and collected by OpenTelemetry (e.g. using
- /// Collector) this is the time when OpenTelemetry's code observed the event measured
- /// by the clock of the OpenTelemetry code. This field MUST be set once the event is
- /// observed by OpenTelemetry.
- ///
- /// For converting OpenTelemetry log data to formats that support only one timestamp or
- /// when receiving OpenTelemetry log data by recipients that support only one timestamp
- /// internally the following logic is recommended:
- /// - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano.
- ///
- /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
- /// Value of 0 indicates unknown or missing timestamp.
- #[serde(rename = "observedTimeUnixNano")]
- pub observed_time_unix_nano: Option,
- /// Numerical value of the severity, normalized to values described in Log Data Model.
- /// \[Optional\].
- #[serde(rename = "severityNumber")]
- pub severity_number: Option,
- /// The severity text (also known as log level). The original string representation as
- /// it is known at the source. \[Optional\].
- #[serde(rename = "severityText")]
- pub severity_text: Option,
- /// A value containing the body of the log record. Can be for example a human-readable
- /// string message (including multi-line) describing the event in a free form or it can
- /// be a structured data composed of arrays and maps of other values. \[Optional\].
- pub body: Option,
- /// Additional attributes that describe the specific event occurrence. \[Optional\].
- /// Attribute keys MUST be unique (it is not allowed to have more than one
- /// attribute with the same key).
- pub attributes: Option>,
- #[serde(rename = "droppedAttributesCount")]
- pub dropped_attributes_count: Option,
- /// Flags, a bit field. 8 least significant bits are the trace flags as
- /// defined in W3C Trace Context specification. 24 most significant bits are reserved
- /// and must be set to 0. Readers must not assume that 24 most significant bits
- /// will be zero and must correctly mask the bits when reading 8-bit trace flag (use
- /// flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). \[Optional\].
- pub flags: Option,
- /// A unique identifier for a trace. All logs from the same trace share
- /// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
- /// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
- /// is zero-length and thus is also invalid).
- ///
- /// This field is optional.
- ///
- /// The receivers SHOULD assume that the log record is not associated with a
- /// trace if any of the following is true:
- /// - the field is not present,
- /// - the field contains an invalid value.
- #[serde(rename = "traceId")]
- pub trace_id: Option,
- /// A unique identifier for a span within a trace, assigned when the span
- /// is created. The ID is an 8-byte array. An ID with all zeroes OR of length
- /// other than 8 bytes is considered invalid (empty string in OTLP/JSON
- /// is zero-length and thus is also invalid).
- ///
- /// This field is optional. If the sender specifies a valid span_id then it SHOULD also
- /// specify a valid trace_id.
- ///
- /// The receivers SHOULD assume that the log record is not associated with a
- /// span if any of the following is true:
- /// - the field is not present,
- /// - the field contains an invalid value.
- #[serde(rename = "spanId")]
- pub span_id: Option,
- }
- /// Possible values for LogRecord.SeverityNumber.
- #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
- #[repr(i32)]
- pub enum SeverityNumber {
- /// UNSPECIFIED is the default SeverityNumber, it MUST NOT be used.
- Unspecified = 0,
- Trace = 1,
- Trace2 = 2,
- Trace3 = 3,
- Trace4 = 4,
- Debug = 5,
- Debug2 = 6,
- Debug3 = 7,
- Debug4 = 8,
- Info = 9,
- Info2 = 10,
- Info3 = 11,
- Info4 = 12,
- Warn = 13,
- Warn2 = 14,
- Warn3 = 15,
- Warn4 = 16,
- Error = 17,
- Error2 = 18,
- Error3 = 19,
- Error4 = 20,
- Fatal = 21,
- Fatal2 = 22,
- Fatal3 = 23,
- Fatal4 = 24,
- }
- impl SeverityNumber {
- /// String value of the enum field names used in the ProtoBuf definition.
- ///
- /// The values are not transformed in any way and thus are considered stable
- /// (if the ProtoBuf definition does not change) and safe for programmatic use.
- pub fn as_str_name(severity_number: i32) -> &'static str {
- match severity_number {
- 0 => "SEVERITY_NUMBER_UNSPECIFIED",
- 1 => "SEVERITY_NUMBER_TRACE",
- 2 => "SEVERITY_NUMBER_TRACE2",
- 3 => "SEVERITY_NUMBER_TRACE3",
- 4 => "SEVERITY_NUMBER_TRACE4",
- 5 => "SEVERITY_NUMBER_DEBUG",
- 6 => "SEVERITY_NUMBER_DEBUG2",
- 7 => "SEVERITY_NUMBER_DEBUG3",
- 8 => "SEVERITY_NUMBER_DEBUG4",
- 9 => "SEVERITY_NUMBER_INFO",
- 10 => "SEVERITY_NUMBER_INFO2",
- 11 => "SEVERITY_NUMBER_INFO3",
- 12 => "SEVERITY_NUMBER_INFO4",
- 13 => "SEVERITY_NUMBER_WARN",
- 14 => "SEVERITY_NUMBER_WARN2",
- 15 => "SEVERITY_NUMBER_WARN3",
- 16 => "SEVERITY_NUMBER_WARN4",
- 17 => "SEVERITY_NUMBER_ERROR",
- 18 => "SEVERITY_NUMBER_ERROR2",
- 19 => "SEVERITY_NUMBER_ERROR3",
- 20 => "SEVERITY_NUMBER_ERROR4",
- 21 => "SEVERITY_NUMBER_FATAL",
- 22 => "SEVERITY_NUMBER_FATAL2",
- 23 => "SEVERITY_NUMBER_FATAL3",
- 24 => "SEVERITY_NUMBER_FATAL4",
- _ => "Invalid severity number",
- }
- }
- /// Creates an enum from field names used in the ProtoBuf definition.
- pub fn from_str_name(value: &str) -> ::core::option::Option {
- match value {
- "SEVERITY_NUMBER_UNSPECIFIED" => Some(Self::Unspecified),
- "SEVERITY_NUMBER_TRACE" => Some(Self::Trace),
- "SEVERITY_NUMBER_TRACE2" => Some(Self::Trace2),
- "SEVERITY_NUMBER_TRACE3" => Some(Self::Trace3),
- "SEVERITY_NUMBER_TRACE4" => Some(Self::Trace4),
- "SEVERITY_NUMBER_DEBUG" => Some(Self::Debug),
- "SEVERITY_NUMBER_DEBUG2" => Some(Self::Debug2),
- "SEVERITY_NUMBER_DEBUG3" => Some(Self::Debug3),
- "SEVERITY_NUMBER_DEBUG4" => Some(Self::Debug4),
- "SEVERITY_NUMBER_INFO" => Some(Self::Info),
- "SEVERITY_NUMBER_INFO2" => Some(Self::Info2),
- "SEVERITY_NUMBER_INFO3" => Some(Self::Info3),
- "SEVERITY_NUMBER_INFO4" => Some(Self::Info4),
- "SEVERITY_NUMBER_WARN" => Some(Self::Warn),
- "SEVERITY_NUMBER_WARN2" => Some(Self::Warn2),
- "SEVERITY_NUMBER_WARN3" => Some(Self::Warn3),
- "SEVERITY_NUMBER_WARN4" => Some(Self::Warn4),
- "SEVERITY_NUMBER_ERROR" => Some(Self::Error),
- "SEVERITY_NUMBER_ERROR2" => Some(Self::Error2),
- "SEVERITY_NUMBER_ERROR3" => Some(Self::Error3),
- "SEVERITY_NUMBER_ERROR4" => Some(Self::Error4),
- "SEVERITY_NUMBER_FATAL" => Some(Self::Fatal),
- "SEVERITY_NUMBER_FATAL2" => Some(Self::Fatal2),
- "SEVERITY_NUMBER_FATAL3" => Some(Self::Fatal3),
- "SEVERITY_NUMBER_FATAL4" => Some(Self::Fatal4),
- _ => None,
- }
- }
- }
- /// LogRecordFlags is defined as a protobuf 'uint32' type and is to be used as
- /// bit-fields. Each non-zero value defined in this enum is a bit-mask.
- /// To extract the bit-field, for example, use an expression like:
- ///
- /// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK)
- ///
- #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
- #[repr(i32)]
- pub enum LogRecordFlags {
- /// The zero value for the enum. Should not be used for comparisons.
- /// Instead use bitwise "and" with the appropriate mask as shown above.
- DoNotUse = 0,
- /// Bits 0-7 are used for trace flags.
- TraceFlagsMask = 255,
- }
- impl LogRecordFlags {
- /// String value of the enum field names used in the ProtoBuf definition.
- ///
- /// The values are not transformed in any way and thus are considered stable
- /// (if the ProtoBuf definition does not change) and safe for programmatic use.
- pub fn as_str_name(flag: u32) -> &'static str {
- match flag {
- 0 => "LOG_RECORD_FLAGS_DO_NOT_USE",
- 255 => "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK",
- _ => "Invalid flag",
- }
- }
- /// Creates an enum from field names used in the ProtoBuf definition.
- pub fn from_str_name(value: &str) -> ::core::option::Option {
- match value {
- "LOG_RECORD_FLAGS_DO_NOT_USE" => Some(Self::DoNotUse),
- "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK" => Some(Self::TraceFlagsMask),
- _ => None,
- }
- }
- }
-
\ No newline at end of file
diff --git a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.metrics.v1.rs b/src/handlers/http/otel/compiled_protos/opentelemetry.proto.metrics.v1.rs
deleted file mode 100644
index eb618f7cb..000000000
--- a/src/handlers/http/otel/compiled_protos/opentelemetry.proto.metrics.v1.rs
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Parseable Server (C) 2022 - 2024 Parseable, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as
- * published by the Free Software Foundation, either version 3 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- *
- */
-
-// This file was generated by protoc-gen-rust-protobuf. The file was edited after the generation.
- // All the repeated fields were changed to Option>.
-
-/// MetricsData represents the metrics data that can be stored in a persistent
-/// storage, OR can be embedded by other protocols that transfer OTLP metrics
-/// data but do not implement the OTLP protocol.
-///
-/// MetricsData
-/// └─── ResourceMetrics
-/// ├── Resource
-/// ├── SchemaURL
-/// └── ScopeMetrics
-/// ├── Scope
-/// ├── SchemaURL
-/// └── Metric
-/// ├── Name
-/// ├── Description
-/// ├── Unit
-/// └── data
-/// ├── Gauge
-/// ├── Sum
-/// ├── Histogram
-/// ├── ExponentialHistogram
-/// └── Summary
-///
-/// The main difference between this message and collector protocol is that
-/// in this message there will not be any "control" or "metadata" specific to
-/// OTLP protocol.
-///
-/// When new fields are added into this message, the OTLP request MUST be updated
-/// as well.
- use crate::handlers::http::otel::proto::common::v1::InstrumentationScope;
- use crate::handlers::http::otel::proto::common::v1::KeyValue;
- use crate::handlers::http::otel::proto::resource::v1::Resource;
- use serde::{Deserialize, Serialize};
-
- #[derive(Serialize, Deserialize, Debug)]
- #[serde(rename_all = "camelCase")]
-pub struct MetricsData {
- /// An array of ResourceMetrics.
- /// For data coming from a single resource this array will typically contain
- /// one element. Intermediary nodes that receive data from multiple origins
- /// typically batch the data before forwarding further and in that case this
- /// array will contain multiple elements.
- #[serde(rename = "resourceMetrics")]
- pub resource_metrics: Option>,
-}
-/// A collection of ScopeMetrics from a Resource.
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct ResourceMetrics {
- /// The resource for the metrics in this message.
- /// If this field is not set then no resource info is known.
- pub resource: Option,
- /// A list of metrics that originate from a resource.
- #[serde(rename = "scopeMetrics")]
- pub scope_metrics: Option>,
- /// The Schema URL, if known. This is the identifier of the Schema that the resource data
- /// is recorded in. Notably, the last part of the URL path is the version number of the
- /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see
- ///
- /// This schema_url applies to the data in the "resource" field. It does not apply
- /// to the data in the "scope_metrics" field which have their own schema_url field.
- #[serde(rename = "schemaUrl")]
- pub schema_url: Option,
-}
-/// A collection of Metrics produced by an Scope.
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct ScopeMetrics {
- /// The instrumentation scope information for the metrics in this message.
- /// Semantically when InstrumentationScope isn't set, it is equivalent with
- /// an empty instrumentation scope name (unknown).
- pub scope: Option,
- /// A list of metrics that originate from an instrumentation library.
- #[serde(rename = "metrics")]
- pub metrics: Vec,
- /// The Schema URL, if known. This is the identifier of the Schema that the metric data
- /// is recorded in. Notably, the last part of the URL path is the version number of the
- /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see
- ///
- /// This schema_url applies to all metrics in the "metrics" field.
- #[serde(rename = "schemaUrl")]
- pub schema_url: Option,
-}
-/// Defines a Metric which has one or more timeseries. The following is a
-/// brief summary of the Metric data model. For more details, see:
-///
-///
-///
-/// The data model and relation between entities is shown in the
-/// diagram below. Here, "DataPoint" is the term used to refer to any
-/// one of the specific data point value types, and "points" is the term used
-/// to refer to any one of the lists of points contained in the Metric.
-///
-/// - Metric is composed of a metadata and data.
-/// - Metadata part contains a name, description, unit.
-/// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
-/// - DataPoint contains timestamps, attributes, and one of the possible value type
-/// fields.
-///
-/// Metric
-/// +------------+
-/// |name |
-/// |description |
-/// |unit | +------------------------------------+
-/// |data |---> |Gauge, Sum, Histogram, Summary, ... |
-/// +------------+ +------------------------------------+
-///
-/// Data \[One of Gauge, Sum, Histogram, Summary, ...\]
-/// +-----------+
-/// |... | // Metadata about the Data.
-/// |points |--+
-/// +-----------+ |
-/// | +---------------------------+
-/// | |DataPoint 1 |
-/// v |+------+------+ +------+ |
-/// +-----+ ||label |label |...|label | |
-/// | 1 |-->||value1|value2|...|valueN| |
-/// +-----+ |+------+------+ +------+ |
-/// | . | |+-----+ |
-/// | . | ||value| |
-/// | . | |+-----+ |
-/// | . | +---------------------------+
-/// | . | .
-/// | . | .
-/// | . | .
-/// | . | +---------------------------+
-/// | . | |DataPoint M |
-/// +-----+ |+------+------+ +------+ |
-/// | M |-->||label |label |...|label | |
-/// +-----+ ||value1|value2|...|valueN| |
-/// |+------+------+ +------+ |
-/// |+-----+ |
-/// ||value| |
-/// |+-----+ |
-/// +---------------------------+
-///
-/// Each distinct type of DataPoint represents the output of a specific
-/// aggregation function, the result of applying the DataPoint's
-/// associated function of to one or more measurements.
-///
-/// All DataPoint types have three common fields:
-/// - Attributes includes key-value pairs associated with the data point
-/// - TimeUnixNano is required, set to the end time of the aggregation
-/// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
-/// having an AggregationTemporality field, as discussed below.
-///
-/// Both TimeUnixNano and StartTimeUnixNano values are expressed as
-/// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
-///
-/// # TimeUnixNano
-///
-/// This field is required, having consistent interpretation across
-/// DataPoint types. TimeUnixNano is the moment corresponding to when
-/// the data point's aggregate value was captured.
-///
-/// Data points with the 0 value for TimeUnixNano SHOULD be rejected
-/// by consumers.
-///
-/// # StartTimeUnixNano
-///
-/// StartTimeUnixNano in general allows detecting when a sequence of
-/// observations is unbroken. This field indicates to consumers the
-/// start time for points with cumulative and delta
-/// AggregationTemporality, and it should be included whenever possible
-/// to support correct rate calculation. Although it may be omitted
-/// when the start time is truly unknown, setting StartTimeUnixNano is
-/// strongly encouraged.
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct Metric {
- /// name of the metric.
- pub name: Option,
- /// description of the metric, which can be used in documentation.
- pub description: Option,
- /// unit in which the metric value is reported. Follows the format
- /// described by
- pub unit: Option,
- /// Additional metadata attributes that describe the metric. \[Optional\].
- /// Attributes are non-identifying.
- /// Consumers SHOULD NOT need to be aware of these attributes.
- /// These attributes MAY be used to encode information allowing
- /// for lossless roundtrip translation to / from another data model.
- /// Attribute keys MUST be unique (it is not allowed to have more than one
- /// attribute with the same key).
- pub metadata: Option>,
- /// Data determines the aggregation type (if any) of the metric, what is the
- /// reported value type for the data points, as well as the relatationship to
- /// the time interval over which they are reported.
- pub gauge: Option,
- pub sum: Option,
- pub histogram: Option,
- pub exponential_histogram: Option,
- pub summary: Option,
-}
-/// Gauge represents the type of a scalar metric that always exports the
-/// "current value" for every data point. It should be used for an "unknown"
-/// aggregation.
-///
-/// A Gauge does not support different aggregation temporalities. Given the
-/// aggregation is unknown, points cannot be combined using the same
-/// aggregation, regardless of aggregation temporalities. Therefore,
-/// AggregationTemporality is not included. Consequently, this also means
-/// "StartTimeUnixNano" is ignored for all data points.
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct Gauge {
- pub data_points: Option>,
-}
-/// Sum represents the type of a scalar metric that is calculated as a sum of all
-/// reported measurements over a time interval.
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct Sum {
- pub data_points: Option>,
- /// aggregation_temporality describes if the aggregator reports delta changes
- /// since last report time, or cumulative changes since a fixed start time.
- pub aggregation_temporality: Option,
- /// If "true" means that the sum is monotonic.
- pub is_monotonic: Option,
-}
-/// Histogram represents the type of a metric that is calculated by aggregating
-/// as a Histogram of all reported measurements over a time interval.
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct Histogram {
-
- pub data_points: Option>,
- /// aggregation_temporality describes if the aggregator reports delta changes
- /// since last report time, or cumulative changes since a fixed start time.
- pub aggregation_temporality: Option,
-}
-/// ExponentialHistogram represents the type of a metric that is calculated by aggregating
-/// as a ExponentialHistogram of all reported double measurements over a time interval.
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct ExponentialHistogram {
- pub data_points: Option>,
- /// aggregation_temporality describes if the aggregator reports delta changes
- /// since last report time, or cumulative changes since a fixed start time.
- pub aggregation_temporality: Option,
-}
-/// Summary metric data are used to convey quantile summaries,
-/// a Prometheus (see: )
-/// and OpenMetrics (see: )
-/// data type. These data points cannot always be merged in a meaningful way.
-/// While they can be useful in some applications, histogram data points are
-/// recommended for new applications.
-/// Summary metrics do not have an aggregation temporality field. This is
-/// because the count and sum fields of a SummaryDataPoint are assumed to be
-/// cumulative values.
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct Summary {
- pub data_points: Option>,
-}
-/// NumberDataPoint is a single data point in a timeseries that describes the
-/// time-varying scalar value of a metric.
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct NumberDataPoint {
- /// The set of key/value pairs that uniquely identify the timeseries from
- /// where this point belongs. The list may be empty (may contain 0 elements).
- /// Attribute keys MUST be unique (it is not allowed to have more than one
- /// attribute with the same key).
- pub attributes: Option>,
- /// StartTimeUnixNano is optional but strongly encouraged, see the
- /// the detailed comments above Metric.
- ///
- /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- /// 1970.
- pub start_time_unix_nano: Option,
- /// TimeUnixNano is required, see the detailed comments above Metric.
- ///
- /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- /// 1970.
- pub time_unix_nano: Option,
- /// (Optional) List of exemplars collected from
- /// measurements that were used to form the data point
- pub exemplars: Option>,
- /// Flags that apply to this specific data point. See DataPointFlags
- /// for the available flags and their meaning.
- pub flags: Option,
- /// The value itself. A point is considered invalid when one of the recognized
- /// value fields is not present inside this oneof.
- pub as_double: Option,
- pub as_int: Option,
-}
-/// HistogramDataPoint is a single data point in a timeseries that describes the
-/// time-varying values of a Histogram. A Histogram contains summary statistics
-/// for a population of values, it may optionally contain the distribution of
-/// those values across a set of buckets.
-///
-/// If the histogram contains the distribution of values, then both
-/// "explicit_bounds" and "bucket counts" fields must be defined.
-/// If the histogram does not contain the distribution of values, then both
-/// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
-/// "sum" are known.
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct HistogramDataPoint {
- /// The set of key/value pairs that uniquely identify the timeseries from
- /// where this point belongs. The list may be empty (may contain 0 elements).
- /// Attribute keys MUST be unique (it is not allowed to have more than one
- /// attribute with the same key).
- pub attributes: Option>,
- /// StartTimeUnixNano is optional but strongly encouraged, see the
- /// the detailed comments above Metric.
- ///
- /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- /// 1970.
- pub start_time_unix_nano: Option,
- /// TimeUnixNano is required, see the detailed comments above Metric.
- ///
- /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- /// 1970.
- pub time_unix_nano: Option,
- /// count is the number of values in the population. Must be non-negative. This
- /// value must be equal to the sum of the "count" fields in buckets if a
- /// histogram is provided.
- pub count: Option,
- /// sum of the values in the population. If count is zero then this field
- /// must be zero.
- ///
- /// Note: Sum should only be filled out when measuring non-negative discrete
- /// events, and is assumed to be monotonic over the values of these events.
- /// Negative events *can* be recorded, but sum should not be filled out when
- /// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- /// see:
- pub sum: Option,
- /// bucket_counts is an optional field contains the count values of histogram
- /// for each bucket.
- ///
- /// The sum of the bucket_counts must equal the value in the count field.
- ///
- /// The number of elements in bucket_counts array must be by one greater than
- /// the number of elements in explicit_bounds array.
- pub bucket_counts: Option>,
- /// explicit_bounds specifies buckets with explicitly defined bounds for values.
- ///
- /// The boundaries for bucket at index i are:
- ///
- /// (-infinity, explicit_bounds\[i]\] for i == 0
- /// (explicit_bounds\[i-1\], explicit_bounds\[i]\] for 0 < i < size(explicit_bounds)
- /// (explicit_bounds\[i-1\], +infinity) for i == size(explicit_bounds)
- ///
- /// The values in the explicit_bounds array must be strictly increasing.
- ///
- /// Histogram buckets are inclusive of their upper boundary, except the last
- /// bucket where the boundary is at infinity. This format is intentionally
- /// compatible with the OpenMetrics histogram definition.
- pub explicit_bounds: Option>,
- /// (Optional) List of exemplars collected from
- /// measurements that were used to form the data point
- pub exemplars: Option>,
- /// Flags that apply to this specific data point. See DataPointFlags
- /// for the available flags and their meaning.
- pub flags: Option,
- /// min is the minimum value over (start_time, end_time].
- pub min: Option,
- /// max is the maximum value over (start_time, end_time].
- pub max: Option,
-}
-/// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
-/// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
-/// summary statistics for a population of values, it may optionally contain the
-/// distribution of those values across a set of buckets.
-///
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct ExponentialHistogramDataPoint {
- /// The set of key/value pairs that uniquely identify the timeseries from
- /// where this point belongs. The list may be empty (may contain 0 elements).
- /// Attribute keys MUST be unique (it is not allowed to have more than one
- /// attribute with the same key).
- pub attributes: Option>,
- /// StartTimeUnixNano is optional but strongly encouraged, see the
- /// the detailed comments above Metric.
- ///
- /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- /// 1970.
- pub start_time_unix_nano: Option,
- /// TimeUnixNano is required, see the detailed comments above Metric.
- ///
- /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- /// 1970.
- pub time_unix_nano: Option,
- /// count is the number of values in the population. Must be
- /// non-negative. This value must be equal to the sum of the "bucket_counts"
- /// values in the positive and negative Buckets plus the "zero_count" field.
- pub count: Option,
- /// sum of the values in the population. If count is zero then this field
- /// must be zero.
- ///
- /// Note: Sum should only be filled out when measuring non-negative discrete
- /// events, and is assumed to be monotonic over the values of these events.
- /// Negative events *can* be recorded, but sum should not be filled out when
- /// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- /// see:
- pub sum: Option,
- /// scale describes the resolution of the histogram. Boundaries are
- /// located at powers of the base, where:
- ///
- /// base = (2^(2^-scale))
- ///
- /// The histogram bucket identified by `index`, a signed integer,
- /// contains values that are greater than (base^index) and
- /// less than or equal to (base^(index+1)).
- ///
- /// The positive and negative ranges of the histogram are expressed
- /// separately. Negative values are mapped by their absolute value
- /// into the negative range using the same scale as the positive range.
- ///
- /// scale is not restricted by the protocol, as the permissible
- /// values depend on the range of the data.
- pub scale: Option,
- /// zero_count is the count of values that are either exactly zero or
- /// within the region considered zero by the instrumentation at the
- /// tolerated degree of precision. This bucket stores values that
- /// cannot be expressed using the standard exponential formula as
- /// well as values that have been rounded to zero.
- ///
- /// Implementations MAY consider the zero bucket to have probability
- /// mass equal to (zero_count / count).
- pub zero_count: Option,
- /// positive carries the positive range of exponential bucket counts.
- pub positive: Option,
- /// negative carries the negative range of exponential bucket counts.
- pub negative: Option,
- /// Flags that apply to this specific data point. See DataPointFlags
- /// for the available flags and their meaning.
- pub flags: Option,
- /// (Optional) List of exemplars collected from
- /// measurements that were used to form the data point
- pub exemplars: Option>,
- /// min is the minimum value over (start_time, end_time].
- pub min: Option,
- /// max is the maximum value over (start_time, end_time].
- pub max: Option,
- /// ZeroThreshold may be optionally set to convey the width of the zero
- /// region. Where the zero region is defined as the closed interval
- /// \[-ZeroThreshold, ZeroThreshold\].
- /// When ZeroThreshold is 0, zero count bucket stores values that cannot be
- /// expressed using the standard exponential formula as well as values that
- /// have been rounded to zero.
- pub zero_threshold: Option,
-}
-/// Nested message and enum types in `ExponentialHistogramDataPoint`.
-pub mod exponential_histogram_data_point {
- use serde::{Deserialize, Serialize};
- /// Buckets are a set of bucket counts, encoded in a contiguous array
- /// of counts.
- #[derive(Serialize, Deserialize, Debug)]
- #[serde(rename_all = "camelCase")]
- pub struct Buckets {
- /// Offset is the bucket index of the first entry in the bucket_counts array.
- ///
- /// Note: This uses a varint encoding as a simple form of compression.
- pub offset: Option,
- /// bucket_counts is an array of count values, where bucket_counts\[i\] carries
- /// the count of the bucket at index (offset+i). bucket_counts\[i\] is the count
- /// of values greater than base^(offset+i) and less than or equal to
- /// base^(offset+i+1).
- ///
- /// Note: By contrast, the explicit HistogramDataPoint uses
- /// fixed64. This field is expected to have many buckets,
- /// especially zeros, so uint64 has been selected to ensure
- /// varint encoding.
- pub bucket_counts: Option>,
- }
-}
-/// SummaryDataPoint is a single data point in a timeseries that describes the
-/// time-varying values of a Summary metric. The count and sum fields represent
-/// cumulative values.
-#[derive(Serialize, Deserialize, Debug)]
-#[serde(rename_all = "camelCase")]
-pub struct SummaryDataPoint {
- /// The set of key/value pairs that uniquely identify the timeseries from
- /// where this point belongs. The list may be empty (may contain 0 elements).
- /// Attribute keys MUST be unique (it is not allowed to have more than one
- /// attribute with the same key).
- pub attributes: Option>,
- /// StartTimeUnixNano is optional but strongly encouraged, see the
- /// the detailed comments above Metric.
- ///
- /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- /// 1970.
- pub start_time_unix_nano: Option,
- /// TimeUnixNano is required, see the detailed comments above Metric.
- ///
- /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- /// 1970.
- pub time_unix_nano: Option,
- /// count is the number of values in the population. Must be non-negative.
- pub count: Option,
- /// sum of the values in the population. If count is zero then this field
- /// must be zero.
- ///
- /// Note: Sum should only be filled out when measuring non-negative discrete
- /// events, and is assumed to be monotonic over the values of these events.
- /// Negative events *can* be recorded, but sum should not be filled out when
- /// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- /// see:
- pub sum: Option,
- /// (Optional) list of values at different quantiles of the distribution calculated
- /// from the current snapshot. The quantiles must be strictly increasing.
- pub quantile_values: Option>,
- /// Flags that apply to this specific data point. See DataPointFlags
- /// for the available flags and their meaning.
- pub flags: Option,
-}
-/// Nested message and enum types in `SummaryDataPoint`.
-pub mod summary_data_point {
- use serde::{Deserialize, Deserializer, Serialize};
- /// Represents the value at a given quantile of a distribution.
- ///
- /// To record Min and Max values following conventions are used:
- /// - The 1.0 quantile is equivalent to the maximum value observed.
- /// - The 0.0 quantile is equivalent to the minimum value observed.
- ///
- /// See the following issue for more context:
- ///
- #[derive(Serialize, Deserialize, Debug)]
- #[serde(rename_all = "camelCase")]
- pub struct ValueAtQuantile {
- /// The quantile of a distribution. Must be in the interval
- /// \[0.0, 1.0\].
- pub quantile: Option,
- /// The value at the given quantile of a distribution.
- ///
- /// Quantile values must NOT be negative.
- #[serde(deserialize_with = "deserialize_f64_or_nan")]
- pub value: Option,
- }
-
- fn deserialize_f64_or_nan<'de, D>(deserializer: D) -> Result