diff --git a/src/alerts/mod.rs b/src/alerts/mod.rs
index 9523e5e1f..8fce4bcec 100644
--- a/src/alerts/mod.rs
+++ b/src/alerts/mod.rs
@@ -136,7 +136,7 @@ pub struct Message {
impl Message {
// checks if message (with a column name) is valid (i.e. the column name is present in the schema)
pub fn valid(&self, schema: &Schema, column: &str) -> bool {
- return get_field(&schema.fields, column).is_some();
+ get_field(&schema.fields, column).is_some()
}
pub fn extract_column_names(&self) -> Vec<&str> {
diff --git a/src/handlers/http/modal/utils/logstream_utils.rs b/src/handlers/http/modal/utils/logstream_utils.rs
index 0081a258a..caa111e6a 100644
--- a/src/handlers/http/modal/utils/logstream_utils.rs
+++ b/src/handlers/http/modal/utils/logstream_utils.rs
@@ -148,7 +148,8 @@ async fn update_stream(
return Ok(req.headers().clone());
}
validate_and_update_custom_partition(stream_name, custom_partition).await?;
- return Ok(req.headers().clone());
+
+ Ok(req.headers().clone())
}
async fn validate_and_update_custom_partition(
diff --git a/src/handlers/http/otel/opentelemetry.proto.resource.v1.rs b/src/handlers/http/otel/opentelemetry.proto.resource.v1.rs
index 51f86481a..2f102628a 100644
--- a/src/handlers/http/otel/opentelemetry.proto.resource.v1.rs
+++ b/src/handlers/http/otel/opentelemetry.proto.resource.v1.rs
@@ -15,24 +15,22 @@
* along with this program. If not, see .
*
*/
- // This file was generated by protoc-gen-rust-protobuf. The file was edited after the generation.
- // All the repeated fields were changed to Option>
+// This file was generated by protoc-gen-rust-protobuf. The file was edited after the generation.
+// All the repeated fields were changed to Option>
- use crate::handlers::http::otel::proto::common::v1::KeyValue;
- use serde::{Deserialize, Serialize};
-
- #[derive(Serialize, Deserialize, Debug)]
- /// Resource information.
- pub struct Resource {
- /// Set of attributes that describe the resource.
- /// Attribute keys MUST be unique (it is not allowed to have more than one
- /// attribute with the same key).
- #[serde(rename = "attributes")]
- pub attributes: Option>,
- /// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
- /// no attributes were dropped.
-
- #[serde(rename = "droppedAttributesCount")]
- pub dropped_attributes_count: Option,
- }
-
\ No newline at end of file
+use crate::handlers::http::otel::proto::common::v1::KeyValue;
+use serde::{Deserialize, Serialize};
+
+#[derive(Serialize, Deserialize, Debug)]
+/// Resource information.
+pub struct Resource {
+ /// Set of attributes that describe the resource.
+ /// Attribute keys MUST be unique (it is not allowed to have more than one
+ /// attribute with the same key).
+ #[serde(rename = "attributes")]
+ pub attributes: Option>,
+ /// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
+ /// no attributes were dropped.
+ #[serde(rename = "droppedAttributesCount")]
+ pub dropped_attributes_count: Option,
+}
diff --git a/src/utils/arrow/mod.rs b/src/utils/arrow/mod.rs
index 87af65735..b3105eeee 100644
--- a/src/utils/arrow/mod.rs
+++ b/src/utils/arrow/mod.rs
@@ -17,6 +17,29 @@
*
*/
+//! example function for concat recordbatch(may not work)
+//! ```rust
+//! # use arrow::record_batch::RecordBatch;
+//! # use arrow::error::Result;
+//!
+//! fn concat_batches(batch1: RecordBatch, batch2: RecordBatch) -> Result {
+//! let schema = batch1.schema();
+//! let columns = schema
+//! .fields()
+//! .iter()
+//! .enumerate()
+//! .map(|(i, _)| -> Result<_> {
+//! let array1 = batch1.column(i);
+//! let array2 = batch2.column(i);
+//! let array = arrow::compute::concat(&[array1.as_ref(), array2.as_ref()])?;
+//! Ok(array)
+//! })
+//! .collect::>>()?;
+//!
+//! RecordBatch::try_new(schema.clone(), columns)
+//! }
+//! ```
+
use std::sync::Arc;
use arrow_array::{Array, RecordBatch};
@@ -33,30 +56,6 @@ pub use batch_adapter::adapt_batch;
pub use merged_reader::MergedRecordReader;
use serde_json::{Map, Value};
-/// example function for concat recordbatch(may not work)
-/// ```rust
-/// # use arrow::record_batch::RecordBatch;
-/// # use arrow::error::Result;
-///
-/// fn concat_batches(batch1: RecordBatch, batch2: RecordBatch) -> Result {
-/// let schema = batch1.schema();
-/// let columns = schema
-/// .fields()
-/// .iter()
-/// .enumerate()
-/// .map(|(i, _)| -> Result<_> {
-/// let array1 = batch1.column(i);
-/// let array2 = batch2.column(i);
-/// let array = arrow::compute::concat(&[array1.as_ref(), array2.as_ref()])?;
-/// Ok(array)
-/// })
-/// .collect::>>()?;
-///
-/// RecordBatch::try_new(schema.clone(), columns)
-/// }
-/// ```
-///
-
/// Replaces columns in a record batch with new arrays.
///
/// # Arguments