From 3fce3064955535c39371c7048e981bf79e97c52e Mon Sep 17 00:00:00 2001 From: Guoteng Rao <3603304+grao1991@users.noreply.github.com> Date: Thu, 10 Oct 2024 00:16:38 +0000 Subject: [PATCH] big change --- Cargo.lock | 85 +- Cargo.toml | 8 +- .../indexer-grpc-data-service/Cargo.toml | 2 + .../indexer-grpc-data-service/src/config.rs | 64 +- .../src/data_service.rs | 461 ++++++ .../indexer-grpc-data-service/src/lib.rs | 1 + .../indexer-grpc-data-service/src/metrics.rs | 162 -- .../src/old/config.rs | 244 +++ .../indexer-grpc-data-service/src/old/lib.rs | 8 + .../indexer-grpc-data-service/src/old/main.rs | 17 + .../src/old/metrics.rs | 164 ++ .../src/old/service.rs | 1319 +++++++++++++++++ .../indexer-grpc-data-service/src/service.rs | 1220 +-------------- .../indexer-grpc-file-store/Cargo.toml | 2 + .../src/data_manager.rs | 48 + .../{processor.rs => file_store_uploader.rs} | 110 +- .../indexer-grpc-file-store/src/lib.rs | 113 +- .../indexer-grpc-file-store/src/main.rs | 2 +- .../src/metadata_manager.rs | 57 + .../indexer-grpc-utils/src/cache_operator.rs | 6 +- .../src/compression_util.rs | 5 +- protos/proto/aptos/indexer/v1/grpc.proto | 82 + protos/rust/src/pb/aptos.indexer.v1.rs | 396 ++++- protos/rust/src/pb/aptos.indexer.v1.serde.rs | 1234 +++++++++++++++ protos/rust/src/pb/aptos.indexer.v1.tonic.rs | 1254 +++++++++++++++- .../rust/src/pb/aptos.internal.fullnode.v1.rs | 18 +- .../pb/aptos.internal.fullnode.v1.tonic.rs | 90 +- .../rust/src/pb/aptos.remote_executor.v1.rs | 5 +- .../src/pb/aptos.remote_executor.v1.tonic.rs | 87 +- protos/rust/src/pb/aptos.transaction.v1.rs | 276 ++-- protos/rust/src/pb/aptos.util.timestamp.rs | 4 +- .../typescript/src/index.aptos.indexer.v1.ts | 1 + 32 files changed, 5637 insertions(+), 1908 deletions(-) create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/data_service.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/config.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/lib.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/main.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/metrics.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/service.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-store/src/data_manager.rs rename ecosystem/indexer-grpc/indexer-grpc-file-store/src/{processor.rs => file_store_uploader.rs} (66%) create mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-store/src/metadata_manager.rs create mode 100644 protos/proto/aptos/indexer/v1/grpc.proto diff --git a/Cargo.lock b/Cargo.lock index 9662a018635ae..845738c46529a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -362,7 +362,7 @@ dependencies = [ "thiserror", "tokio", "toml 0.7.8", - "tonic 0.11.0", + "tonic 0.12.3", "tracing", "tracing-subscriber 0.3.18", "url", @@ -2110,13 +2110,13 @@ dependencies = [ "futures-core", "jemallocator", "once_cell", - "prost 0.12.3", + "prost 0.13.3", "redis", "reqwest 0.11.23", "serde", "tempfile", "tokio", - "tonic 0.11.0", + "tonic 0.12.3", "tracing", "url", ] @@ -2136,14 +2136,16 @@ dependencies = [ "clap 4.4.14", "futures", "jemallocator", + "lazy_static", "once_cell", - "prost 0.12.3", + "prost 0.13.3", "redis", "serde", "serde_json", "tokio", + "tokio-scoped", "tokio-stream", - "tonic 0.11.0", + "tonic 0.12.3", "tonic-reflection", "tracing", "uuid", @@ -2177,6 +2179,7 @@ dependencies = [ "aptos-indexer-grpc-utils", "aptos-metrics-core", "aptos-moving-average 0.1.0 (git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=4801acae7aea30d7e96bbfbe5ec5b04056dfa4cf)", + "aptos-protos 1.3.1", "async-trait", "clap 4.4.14", "futures", @@ -2185,6 +2188,7 @@ dependencies = [ "redis", "serde", "tokio", + "tonic 0.12.3", "tracing", ] @@ -2203,7 +2207,7 @@ dependencies = [ "serde", "serde_json", "tokio", - "tonic 0.11.0", + "tonic 0.12.3", "tracing", "url", ] @@ -2259,7 +2263,7 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tonic 0.11.0", + "tonic 0.12.3", "tonic-reflection", ] @@ -2349,7 +2353,7 @@ dependencies = [ "lz4", "once_cell", "prometheus", - "prost 0.12.3", + "prost 0.13.3", "redis", "redis-test", "ripemd", @@ -2357,7 +2361,7 @@ dependencies = [ "serde_json", "tokio", "tokio-util 0.7.10", - "tonic 0.11.0", + "tonic 0.12.3", "tracing", "url", ] @@ -2423,7 +2427,7 @@ dependencies = [ "tokio", "tokio-stream", "toml 0.7.8", - "tonic 0.11.0", + "tonic 0.12.3", "url", ] @@ -3476,9 +3480,9 @@ version = "1.3.1" dependencies = [ "futures-core", "pbjson", - "prost 0.12.3", + "prost 0.13.3", "serde", - "tonic 0.11.0", + "tonic 0.12.3", ] [[package]] @@ -3831,7 +3835,7 @@ dependencies = [ "serde", "thiserror", "tokio", - "tonic 0.11.0", + "tonic 0.12.3", "tonic-reflection", ] @@ -4299,7 +4303,7 @@ dependencies = [ "aptos-protos 1.3.1", "derive_builder", "lz4", - "prost 0.12.3", + "prost 0.13.3", "serde", "serde_json", "serde_yaml 0.8.26", @@ -8574,14 +8578,14 @@ dependencies = [ "hyper 1.4.1", "jsonwebtoken 9.3.0", "once_cell", - "prost 0.13.1", + "prost 0.13.3", "prost-types 0.13.1", "reqwest 0.12.5", "secret-vault-value", "serde", "serde_json", "tokio", - "tonic 0.12.1", + "tonic 0.12.3", "tower", "tower-layer", "tower-util", @@ -13780,12 +13784,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ "bytes", - "prost-derive 0.13.1", + "prost-derive 0.13.3", ] [[package]] @@ -13816,9 +13820,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", "itertools 0.13.0", @@ -13851,7 +13855,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2" dependencies = [ - "prost 0.13.1", + "prost 0.13.3", ] [[package]] @@ -14855,6 +14859,19 @@ dependencies = [ "security-framework", ] +[[package]] +name = "rustls-native-certs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.1.1", + "rustls-pki-types", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "0.2.1" @@ -16728,9 +16745,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -16925,15 +16942,16 @@ dependencies = [ [[package]] name = "tonic" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", "axum 0.7.5", "base64 0.22.1", "bytes", + "flate2", "h2 0.4.5", "http 1.1.0", "http-body 1.0.0", @@ -16943,8 +16961,8 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project 1.1.3", - "prost 0.13.1", - "rustls-native-certs 0.7.0", + "prost 0.13.3", + "rustls-native-certs 0.8.0", "rustls-pemfile 2.1.1", "socket2 0.5.5", "tokio", @@ -16954,19 +16972,20 @@ dependencies = [ "tower-layer", "tower-service", "tracing", + "zstd", ] [[package]] name = "tonic-reflection" -version = "0.11.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7" +checksum = "878d81f52e7fcfd80026b7fdb6a9b578b3c3653ba987f87f0dce4b64043cba27" dependencies = [ - "prost 0.12.3", - "prost-types 0.12.3", + "prost 0.13.3", + "prost-types 0.13.1", "tokio", "tokio-stream", - "tonic 0.11.0", + "tonic 0.12.3", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c9b687d313c27..430f004c6a451 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -709,8 +709,8 @@ prometheus-http-query = "0.5.2" prometheus-parse = "0.2.4" proptest = "1.4.0" proptest-derive = "0.4.0" -prost = { version = "0.12.3", features = ["no-recursion-limit"] } -prost-types = "0.12.3" +prost = { version = "0.13.3", features = ["no-recursion-limit"] } +prost-types = "0.13.3" quanta = "0.10.1" quick_cache = "0.5.1" quick-junit = "0.5.0" @@ -804,7 +804,7 @@ tokio-stream = { version = "0.1.14", features = ["fs"] } tokio-test = "0.4.1" tokio-util = { version = "0.7.2", features = ["compat", "codec"] } toml = "0.7.4" -tonic = { version = "0.11.0", features = [ +tonic = { version = "0.12.3", features = [ "tls-roots", "transport", "prost", @@ -812,7 +812,7 @@ tonic = { version = "0.11.0", features = [ "codegen", "zstd", ] } -tonic-reflection = "0.11.0" +tonic-reflection = "0.12.3" topological-sort = "0.2.2" triomphe = "0.1.9" tsify-next = "0.5.4" diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-data-service/Cargo.toml index ae70ba89314fe..f278a4088b20e 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/Cargo.toml @@ -23,12 +23,14 @@ aptos-transaction-filter = { workspace = true } async-trait = { workspace = true } clap = { workspace = true } futures = { workspace = true } +lazy_static = { workspace = true } once_cell = { workspace = true } prost = { workspace = true } redis = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } +tokio-scoped = { workspace = true } tokio-stream = { workspace = true } tonic = { workspace = true } tonic-reflection = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs index c5f621fcde703..ddd6b450c08fb 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs @@ -1,12 +1,12 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::service::RawDataServerWrapper; +use crate::{data_service::DataService, service::RawDataServerWrapper}; use anyhow::{bail, Result}; use aptos_indexer_grpc_server_framework::RunnableConfig; use aptos_indexer_grpc_utils::{ - compression_util::StorageFormat, config::IndexerGrpcFileStoreConfig, - in_memory_cache::InMemoryCacheConfig, types::RedisUrl, + cache_operator::CacheOperator, compression_util::StorageFormat, + config::IndexerGrpcFileStoreConfig, in_memory_cache::InMemoryCacheConfig, types::RedisUrl, }; use aptos_protos::{ indexer::v1::FILE_DESCRIPTOR_SET as INDEXER_V1_FILE_DESCRIPTOR_SET, @@ -14,8 +14,9 @@ use aptos_protos::{ util::timestamp::FILE_DESCRIPTOR_SET as UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET, }; use aptos_transaction_filter::BooleanTransactionFilter; +use once_cell::sync::{Lazy, OnceCell}; use serde::{Deserialize, Serialize}; -use std::{net::SocketAddr, sync::Arc}; +use std::net::SocketAddr; use tonic::{codec::CompressionEncoding, transport::Server}; pub const SERVER_NAME: &str = "idxdatasvc"; @@ -29,6 +30,8 @@ const DEFAULT_MAX_RESPONSE_CHANNEL_SIZE: usize = 3; const HTTP2_PING_INTERVAL_DURATION: std::time::Duration = std::time::Duration::from_secs(60); const HTTP2_PING_TIMEOUT_DURATION: std::time::Duration = std::time::Duration::from_secs(10); +static DATA_SERVICE: OnceCell> = OnceCell::new(); + #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct TlsConfig { @@ -65,9 +68,6 @@ pub struct IndexerGrpcDataServiceConfig { pub file_store_config: IndexerGrpcFileStoreConfig, /// Redis read replica address. pub redis_read_replica_address: RedisUrl, - /// Support compressed cache data. - #[serde(default = "IndexerGrpcDataServiceConfig::default_enable_cache_compression")] - pub enable_cache_compression: bool, #[serde(default)] pub in_memory_cache_config: InMemoryCacheConfig, /// Any transaction that matches this filter will be stripped. This means we remove @@ -105,7 +105,6 @@ impl IndexerGrpcDataServiceConfig { disable_auth_check, file_store_config, redis_read_replica_address, - enable_cache_compression, in_memory_cache_config, txns_to_strip_filter, } @@ -153,43 +152,14 @@ impl RunnableConfig for IndexerGrpcDataServiceConfig { .accept_compressed(CompressionEncoding::Zstd) .accept_compressed(CompressionEncoding::Gzip); - let cache_storage_format: StorageFormat = if self.enable_cache_compression { - StorageFormat::Lz4CompressedProto - } else { - StorageFormat::Base64UncompressedProto - }; - - println!( - ">>>> Starting Redis connection: {:?}", - &self.redis_read_replica_address.0 - ); - let redis_conn = redis::Client::open(self.redis_read_replica_address.0.clone())? - .get_tokio_connection_manager() - .await?; - println!(">>>> Redis connection established"); - // InMemoryCache. - let in_memory_cache = - aptos_indexer_grpc_utils::in_memory_cache::InMemoryCache::new_with_redis_connection( - self.in_memory_cache_config.clone(), - redis_conn, - cache_storage_format, - ) - .await?; - println!(">>>> InMemoryCache established"); + let (handler_tx, handler_rx) = tokio::sync::mpsc::channel(100); // Add authentication interceptor. - let server = RawDataServerWrapper::new( - self.redis_read_replica_address.clone(), - self.file_store_config.clone(), - self.data_service_response_channel_size, - self.txns_to_strip_filter.clone(), - cache_storage_format, - Arc::new(in_memory_cache), - )?; - let svc = aptos_protos::indexer::v1::raw_data_server::RawDataServer::new(server) + let raw_data_server = + RawDataServerWrapper::new(handler_tx, self.data_service_response_channel_size)?; + let svc = aptos_protos::indexer::v1::raw_data_server::RawDataServer::new(raw_data_server) .send_compressed(CompressionEncoding::Zstd) .accept_compressed(CompressionEncoding::Zstd) .accept_compressed(CompressionEncoding::Gzip); - println!(">>>> Starting gRPC server: {:?}", &svc); let svc_clone = svc.clone(); let reflection_service_clone = reflection_service.clone(); @@ -234,6 +204,18 @@ impl RunnableConfig for IndexerGrpcDataServiceConfig { })); } + let redis_client = redis::Client::open(self.redis_read_replica_address.0.clone()).unwrap(); + let conn = + futures::executor::block_on(redis_client.get_tokio_connection_manager()).unwrap(); + let cache_operator = CacheOperator::new(conn, StorageFormat::Lz4CompressedProto); + + tasks.push(tokio::task::spawn_blocking(move || { + DATA_SERVICE + .get_or_init(|| DataService::new(cache_operator)) + .run(handler_rx); + Ok(()) + })); + futures::future::try_join_all(tasks).await?; Ok(()) } diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/data_service.rs new file mode 100644 index 0000000000000..850e1e39a5d06 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/data_service.rs @@ -0,0 +1,461 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_indexer_grpc_utils::{ + cache_operator::CacheOperator, compression_util::StorageFormat, types::RedisUrl, +}; +use aptos_protos::{ + indexer::v1::{GetTransactionsRequest, TransactionsResponse}, + transaction::v1::Transaction, +}; +use futures::future::{BoxFuture, FutureExt, Shared}; +use prost::Message; +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; +use tokio::sync::{ + mpsc::{Receiver, Sender}, + RwLock, +}; +use tonic::Status; +use tracing::{error, info}; + +pub static NUM_SLOTS: usize = 200000000; +pub static SIZE_LIMIT: usize = 10000000000; +pub static DEFAULT_MAX_BATCH_SIZE: usize = 10000; + +type FetchKey = u64; + +struct DataClient { + cache_operator: CacheOperator, +} + +impl DataClient { + fn new(cache_operator: CacheOperator) -> Self { + Self { cache_operator } + } + + async fn fetch_transactions( + &self, + starting_version: u64, + num_transactions: usize, + ) -> Vec { + let res = self + .cache_operator + .clone() + .get_transactions(starting_version, num_transactions as u64) + .await + .unwrap(); + res + } +} + +type FetchTask<'a> = Shared>; + +struct FetchManager<'a> { + data_manager: Arc>, + data_client: Arc, + pending_fetches: RwLock>>, + fetching_latest_data_task: RwLock>>, +} + +impl<'a> FetchManager<'a> { + fn new( + data_manager: Arc>, + cache_operator: CacheOperator, + ) -> Self { + Self { + data_manager, + data_client: Arc::new(DataClient::new(cache_operator)), + pending_fetches: RwLock::new(HashMap::new()), + fetching_latest_data_task: RwLock::new(None), + } + } + + async fn fetch_past_data(&'a self, version: u64) -> FetchTask<'a> { + let fetch_key = version / 100 * 100; + if let Some(fetch_task) = self.pending_fetches.read().await.get(&fetch_key) { + return fetch_task.clone(); + } + + let fetch_task = Self::fetch_and_update_cache( + self.data_client.clone(), + self.data_manager.clone(), + fetch_key, + 100, + ) + .boxed() + .shared(); + self.pending_fetches + .write() + .await + .insert(fetch_key, fetch_task.clone()); + + fetch_task + } + + async fn fetch_and_update_cache( + data_client: Arc, + data_manager: Arc>, + version: u64, + num_transactions: usize, + ) -> usize { + let transactions = data_client + .fetch_transactions(version, num_transactions) + .await; + let len = transactions.len(); + + if len > 0 { + data_manager + .write() + .await + .update_data(version, transactions); + } + + len + } + + async fn fetch_latest_data(&'a self) -> usize { + let version = self.data_manager.read().await.end_version; + info!("Fetching latest data starting from version {version}."); + loop { + let num_transactions = Self::fetch_and_update_cache( + self.data_client.clone(), + self.data_manager.clone(), + version, + 100, + ) + .await; + if num_transactions != 0 { + info!("Finished fetching latest data, got {num_transactions} num_transactions starting from version {version}."); + return num_transactions; + } + tokio::time::sleep(Duration::from_millis(200)).await; + } + } + + async fn continuously_fetch_latest_data(&'a self) { + loop { + let task = self.fetch_latest_data().boxed().shared(); + *self.fetching_latest_data_task.write().await = Some(task.clone()); + let _ = task.await; + } + } +} + +struct DataManager { + start_version: u64, + end_version: u64, + data: Vec>>, + + soft_limit_for_eviction: usize, + eviction_target: usize, + total_size: usize, + num_slots: usize, +} + +impl DataManager { + fn new(end_version: u64, num_slots: usize, size_limit_bytes: usize) -> Self { + Self { + start_version: end_version.saturating_sub(num_slots as u64), + end_version, + data: vec![None; num_slots], + soft_limit_for_eviction: size_limit_bytes, + eviction_target: size_limit_bytes, + total_size: 0, + num_slots, + } + } + + fn update_data(&mut self, start_version: u64, transactions: Vec) { + if start_version > self.end_version { + // TODO(grao): unexpected + return; + } + + let end_version = start_version + transactions.len() as u64; + if end_version <= self.start_version { + return; + } + + let num_to_skip = self.start_version.saturating_sub(start_version); + let start_version = start_version.max(self.start_version); + + let mut size_increased = 0; + let mut size_decreased = 0; + + for (i, transaction) in transactions + .into_iter() + .enumerate() + .skip(num_to_skip as usize) + { + let version = start_version + i as u64; + let slot_index = version as usize % self.num_slots; + if let Some(transaction) = self.data[slot_index].take() { + size_decreased += transaction.encoded_len(); + } + size_increased += transaction.encoded_len(); + self.data[version as usize % self.num_slots] = Some(Box::new(transaction)); + } + + if end_version > self.end_version { + self.end_version = end_version; + if self.start_version + (self.num_slots as u64) < end_version { + self.start_version = end_version - self.num_slots as u64; + } + } + + self.total_size += size_increased; + self.total_size -= size_decreased; + + if self.total_size >= self.soft_limit_for_eviction { + while self.total_size >= self.eviction_target { + if let Some(transaction) = + self.data[self.start_version as usize % self.num_slots].take() + { + self.total_size -= transaction.encoded_len(); + drop(transaction); + } + self.start_version += 1; + } + } + } +} + +pub struct InMemoryCache<'a> { + data_manager: Arc>, + fetch_manager: Arc>, +} + +impl<'a> InMemoryCache<'a> { + pub fn new( + cache_operator: CacheOperator, + known_latest_version: u64, + num_slots: usize, + size_limit_bytes: usize, + ) -> Self { + let data_manager = Arc::new(RwLock::new(DataManager::new( + known_latest_version + 1, + num_slots, + size_limit_bytes, + ))); + let fetch_manager = Arc::new(FetchManager::new(data_manager.clone(), cache_operator)); + Self { + data_manager, + fetch_manager, + } + } + + async fn get_data( + &'a self, + starting_version: u64, + ending_version: u64, + max_batch_size: usize, + ) -> Option> { + while starting_version >= self.data_manager.read().await.end_version { + info!("Reached head, wait..."); + let num_transactions = self + .fetch_manager + .fetching_latest_data_task + .read() + .await + .as_ref() + .unwrap() + .clone() + .await; + + info!("Done waiting, {num_transactions}"); + } + + loop { + let data_manager = self.data_manager.read().await; + + if starting_version < data_manager.start_version { + info!( + "requested_version: {starting_version}, oldest available version: {}", + data_manager.start_version + ); + return None; + } + + let start_index = starting_version as usize % data_manager.num_slots; + + if data_manager.data[start_index].is_none() { + drop(data_manager); + self.fetch_manager + .fetch_past_data(starting_version) + .await + .await; + continue; + } + + let mut total_bytes = 0; + let mut version = starting_version; + let ending_version = ending_version.min(data_manager.end_version); + + if let Some(_) = data_manager.data[version as usize % data_manager.num_slots].as_ref() { + let mut result = Vec::new(); + while version < ending_version { + if let Some(transaction) = + data_manager.data[version as usize % data_manager.num_slots].as_ref() + { + result.push(transaction.as_ref().clone()); + version += 1; + } else { + break; + } + } + info!("version {} is sent", version - 1); + return Some(result); + } else { + unreachable!("Data cannot be None."); + } + } + } +} + +pub struct DataService<'a> { + cache_operator: CacheOperator, + in_memory_cache: InMemoryCache<'a>, + known_latest_version: AtomicU64, +} + +impl<'a> DataService<'a> { + pub fn new(mut cache_operator: CacheOperator) -> Self { + let known_latest_version = futures::executor::block_on(cache_operator.get_latest_version()) + .unwrap() + .unwrap(); + Self { + cache_operator: cache_operator.clone(), + in_memory_cache: InMemoryCache::new( + cache_operator, + known_latest_version, + NUM_SLOTS, + SIZE_LIMIT, + ), + known_latest_version: AtomicU64::new(known_latest_version), + } + } + + pub fn run( + &'a self, + mut handler_rx: Receiver<( + GetTransactionsRequest, + Sender>, + )>, + ) { + tokio_scoped::scope(|scope| { + scope.spawn(async move { + let _ = self + .in_memory_cache + .fetch_manager + .continuously_fetch_latest_data() + .await; + }); + scope.spawn(async move { + loop { + let result = self.cache_operator.clone().get_latest_version().await; + if let Ok(Some(known_latest_version)) = result { + self.set_known_latest_version(known_latest_version); + } else { + error!("Failed to fetch known latest version: {result:?}."); + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }); + while let Some((request, response_sender)) = handler_rx.blocking_recv() { + let known_latest_version = self.get_known_latest_version(); + let starting_version = request.starting_version.unwrap_or(known_latest_version); + + info!("Received request: {request:?}."); + if starting_version > known_latest_version + 10000 { + let err = Err(Status::failed_precondition( + "starting_version cannot be set to a far future version.", + )); + info!("Client error: {err:?}."); + let _ = response_sender.blocking_send(err); + continue; + } + + let max_batch_size = if let Some(batch_size) = request.batch_size { + batch_size as usize + } else { + DEFAULT_MAX_BATCH_SIZE + }; + + let ending_version = request + .transactions_count + .map(|count| starting_version + count); + + scope.spawn(async move { + self.start_streaming( + starting_version, + ending_version, + max_batch_size, + response_sender, + ) + .await + }); + } + }); + } + + async fn start_streaming( + &'a self, + starting_version: u64, + ending_version: Option, + max_batch_size: usize, + response_sender: tokio::sync::mpsc::Sender>, + ) { + info!("Start streaming, starting_version: {starting_version}, ending_version: {ending_version:?}."); + let mut next_version = starting_version; + let ending_version = ending_version.unwrap_or(u64::MAX); + loop { + if next_version >= ending_version { + break; + } + let known_latest_version = self.get_known_latest_version(); + if next_version > known_latest_version { + info!("next_version {next_version} is larger than known_latest_version {known_latest_version}"); + tokio::time::sleep(Duration::from_millis(100)).await; + continue; + } + + if let Some(transactions) = self + .in_memory_cache + .get_data(next_version, ending_version, max_batch_size) + .await + { + next_version += transactions.len() as u64; + let response = TransactionsResponse { + transactions, + // TODO(grao): Fix chain id. + chain_id: Some(0), + }; + if let Err(e) = response_sender.send(Ok(response)).await { + info!("Client dropped."); + break; + } + } else { + let err = Err(Status::not_found("Requested data is too old.")); + info!("Client error: {err:?}."); + let _ = response_sender.send(err).await; + break; + } + } + } + + fn get_known_latest_version(&self) -> u64 { + self.known_latest_version.load(Ordering::SeqCst) + } + + fn set_known_latest_version(&self, version: u64) { + self.known_latest_version.store(version, Ordering::SeqCst); + //info!("Updated known_latest_version to {version}."); + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs index 566941502a239..312a075344980 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 mod config; +mod data_service; mod metrics; mod service; diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs index 4813efda1aed9..7f1658608ae14 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs @@ -1,164 +1,2 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 - -use aptos_metrics_core::{ - register_gauge_vec, register_int_counter_vec, register_int_gauge_vec, GaugeVec, IntCounterVec, - IntGaugeVec, -}; -use once_cell::sync::Lazy; - -// The `identifier` label at the time of writing (2024-04-08) is always the -// application ID, a globally unique ID. - -/// Latest processed transaction version. -pub static LATEST_PROCESSED_VERSION_PER_PROCESSOR: Lazy = Lazy::new(|| { - register_int_gauge_vec!( - "indexer_grpc_data_service_with_user_latest_processed_version", - "Latest processed transaction version", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Number of transactions that served by data service. -pub static PROCESSED_VERSIONS_COUNT_PER_PROCESSOR: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_with_user_processed_versions", - "Number of transactions that have been processed by data service", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Number of errors that data service has encountered. -pub static ERROR_COUNT: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_error", - "Number of errors that data service has encountered", - &["error_type"] - ) - .unwrap() -}); - -/// Data latency for data service based on latest processed transaction based on selected processor. -pub static PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR: Lazy = Lazy::new(|| { - register_gauge_vec!( - "indexer_grpc_data_service_with_user_latest_data_latency_in_secs", - "Latency of data service based on latest processed transaction", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Count of connections that data service has established. -pub static CONNECTION_COUNT: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_connection_count_v2", - "Count of connections that data service has established", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Count of the short connections; i.e., < 10 seconds. -pub static SHORT_CONNECTION_COUNT: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_short_connection_by_user_processor_count", - "Count of the short connections; i.e., < 10 seconds", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Count of bytes transfered to the client. This only represents the bytes prepared and -/// ready to send to the client. This only t It does not represent the bytes actually -/// sent to the client. -/// -/// This is pre stripping, so it may include bytes for transactions that were later -/// stripped. See BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING for post -/// stirpping. -pub static BYTES_READY_TO_TRANSFER_FROM_SERVER: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_bytes_ready_to_transfer_from_server", - "Count of bytes ready to transfer to the client (pre stripping)", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Count of bytes transfered to the client. This only represents the bytes prepared and -/// ready to send to the client. This only t It does not represent the bytes actually -/// sent to the client. -/// -/// This is post stripping, meaning some transactions may have been stripped (removing -/// things such as events, writesets, payload, signature). Compare this with -/// BYTES_READY_TO_TRANSFER_FROM_SERVER to see how many bytes were stripped. -pub static BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING: Lazy = - Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_bytes_ready_to_transfer_from_server_after_stripping", - "Count of bytes ready to transfer to the client (post stripping)", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() - }); - -/// The number of transactions that had data (such as events, writesets, payload, -/// signature) stripped from them due to the `txns_to_strip_filter`. See -/// `strip_transactions` for more. -pub static NUM_TRANSACTIONS_STRIPPED: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_num_transactions_stripped", - "Number of transactions that had data (such as events, writesets, payload, signature) stripped from them", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/config.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/config.rs new file mode 100644 index 0000000000000..c5f621fcde703 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/config.rs @@ -0,0 +1,244 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::service::RawDataServerWrapper; +use anyhow::{bail, Result}; +use aptos_indexer_grpc_server_framework::RunnableConfig; +use aptos_indexer_grpc_utils::{ + compression_util::StorageFormat, config::IndexerGrpcFileStoreConfig, + in_memory_cache::InMemoryCacheConfig, types::RedisUrl, +}; +use aptos_protos::{ + indexer::v1::FILE_DESCRIPTOR_SET as INDEXER_V1_FILE_DESCRIPTOR_SET, + transaction::v1::FILE_DESCRIPTOR_SET as TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET, + util::timestamp::FILE_DESCRIPTOR_SET as UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET, +}; +use aptos_transaction_filter::BooleanTransactionFilter; +use serde::{Deserialize, Serialize}; +use std::{net::SocketAddr, sync::Arc}; +use tonic::{codec::CompressionEncoding, transport::Server}; + +pub const SERVER_NAME: &str = "idxdatasvc"; + +// Default max response channel size. +const DEFAULT_MAX_RESPONSE_CHANNEL_SIZE: usize = 3; + +// HTTP2 ping interval and timeout. +// This can help server to garbage collect dead connections. +// tonic server: https://docs.rs/tonic/latest/tonic/transport/server/struct.Server.html#method.http2_keepalive_interval +const HTTP2_PING_INTERVAL_DURATION: std::time::Duration = std::time::Duration::from_secs(60); +const HTTP2_PING_TIMEOUT_DURATION: std::time::Duration = std::time::Duration::from_secs(10); + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct TlsConfig { + /// The address for the TLS GRPC server to listen on. + pub data_service_grpc_listen_address: SocketAddr, + pub cert_path: String, + pub key_path: String, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct NonTlsConfig { + /// The address for the TLS GRPC server to listen on. + pub data_service_grpc_listen_address: SocketAddr, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct IndexerGrpcDataServiceConfig { + /// If given, we will run a server that uses TLS. + pub data_service_grpc_tls_config: Option, + /// If given, we will run a server that does not use TLS. + pub data_service_grpc_non_tls_config: Option, + /// The size of the response channel that response can be buffered. + #[serde(default = "IndexerGrpcDataServiceConfig::default_data_service_response_channel_size")] + pub data_service_response_channel_size: usize, + /// Deprecated: a list of auth tokens that are allowed to access the service. + #[serde(default)] + pub whitelisted_auth_tokens: Vec, + /// Deprecated: if set, don't check for auth tokens. + #[serde(default)] + pub disable_auth_check: bool, + /// File store config. + pub file_store_config: IndexerGrpcFileStoreConfig, + /// Redis read replica address. + pub redis_read_replica_address: RedisUrl, + /// Support compressed cache data. + #[serde(default = "IndexerGrpcDataServiceConfig::default_enable_cache_compression")] + pub enable_cache_compression: bool, + #[serde(default)] + pub in_memory_cache_config: InMemoryCacheConfig, + /// Any transaction that matches this filter will be stripped. This means we remove + /// the payload, signature, events, and writesets from it before sending it + /// downstream. This should only be used in an emergency situation, e.g. when txns + /// related to a certain module are too large and are causing issues for the data + /// service. Learn more here: + /// + /// https://www.notion.so/aptoslabs/Runbook-c006a37259394ac2ba904d6b54d180fa?pvs=4#171c210964ec42a89574fc80154f9e85 + /// + /// Generally you will want to start with this with an OR, and then list out + /// separate filters that describe each type of txn we want to strip. + #[serde(default = "IndexerGrpcDataServiceConfig::default_txns_to_strip_filter")] + pub txns_to_strip_filter: BooleanTransactionFilter, +} + +impl IndexerGrpcDataServiceConfig { + pub fn new( + data_service_grpc_tls_config: Option, + data_service_grpc_non_tls_config: Option, + data_service_response_channel_size: Option, + disable_auth_check: bool, + file_store_config: IndexerGrpcFileStoreConfig, + redis_read_replica_address: RedisUrl, + enable_cache_compression: bool, + in_memory_cache_config: InMemoryCacheConfig, + txns_to_strip_filter: BooleanTransactionFilter, + ) -> Self { + Self { + data_service_grpc_tls_config, + data_service_grpc_non_tls_config, + data_service_response_channel_size: data_service_response_channel_size + .unwrap_or_else(Self::default_data_service_response_channel_size), + whitelisted_auth_tokens: vec![], + disable_auth_check, + file_store_config, + redis_read_replica_address, + enable_cache_compression, + in_memory_cache_config, + txns_to_strip_filter, + } + } + + pub const fn default_data_service_response_channel_size() -> usize { + DEFAULT_MAX_RESPONSE_CHANNEL_SIZE + } + + pub const fn default_enable_cache_compression() -> bool { + false + } + + pub fn default_txns_to_strip_filter() -> BooleanTransactionFilter { + // This filter matches no txns. + BooleanTransactionFilter::new_or(vec![]) + } +} + +#[async_trait::async_trait] +impl RunnableConfig for IndexerGrpcDataServiceConfig { + fn validate(&self) -> Result<()> { + if self.data_service_grpc_non_tls_config.is_none() + && self.data_service_grpc_tls_config.is_none() + { + bail!("At least one of data_service_grpc_non_tls_config and data_service_grpc_tls_config must be set"); + } + self.in_memory_cache_config.validate()?; + Ok(()) + } + + async fn run(&self) -> Result<()> { + let reflection_service = tonic_reflection::server::Builder::configure() + // Note: It is critical that the file descriptor set is registered for every + // file that the top level API proto depends on recursively. If you don't, + // compilation will still succeed but reflection will fail at runtime. + // + // TODO: Add a test for this / something in build.rs, this is a big footgun. + .register_encoded_file_descriptor_set(INDEXER_V1_FILE_DESCRIPTOR_SET) + .register_encoded_file_descriptor_set(TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET) + .register_encoded_file_descriptor_set(UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET) + .build() + .map_err(|e| anyhow::anyhow!("Failed to build reflection service: {}", e))? + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Gzip); + + let cache_storage_format: StorageFormat = if self.enable_cache_compression { + StorageFormat::Lz4CompressedProto + } else { + StorageFormat::Base64UncompressedProto + }; + + println!( + ">>>> Starting Redis connection: {:?}", + &self.redis_read_replica_address.0 + ); + let redis_conn = redis::Client::open(self.redis_read_replica_address.0.clone())? + .get_tokio_connection_manager() + .await?; + println!(">>>> Redis connection established"); + // InMemoryCache. + let in_memory_cache = + aptos_indexer_grpc_utils::in_memory_cache::InMemoryCache::new_with_redis_connection( + self.in_memory_cache_config.clone(), + redis_conn, + cache_storage_format, + ) + .await?; + println!(">>>> InMemoryCache established"); + // Add authentication interceptor. + let server = RawDataServerWrapper::new( + self.redis_read_replica_address.clone(), + self.file_store_config.clone(), + self.data_service_response_channel_size, + self.txns_to_strip_filter.clone(), + cache_storage_format, + Arc::new(in_memory_cache), + )?; + let svc = aptos_protos::indexer::v1::raw_data_server::RawDataServer::new(server) + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Gzip); + println!(">>>> Starting gRPC server: {:?}", &svc); + + let svc_clone = svc.clone(); + let reflection_service_clone = reflection_service.clone(); + + let mut tasks = vec![]; + if let Some(config) = &self.data_service_grpc_non_tls_config { + let listen_address = config.data_service_grpc_listen_address; + tracing::info!( + grpc_address = listen_address.to_string().as_str(), + "[data service] starting gRPC server with non-TLS." + ); + tasks.push(tokio::spawn(async move { + Server::builder() + .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) + .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) + .add_service(svc_clone) + .add_service(reflection_service_clone) + .serve(listen_address) + .await + .map_err(|e| anyhow::anyhow!(e)) + })); + } + if let Some(config) = &self.data_service_grpc_tls_config { + let listen_address = config.data_service_grpc_listen_address; + let cert = tokio::fs::read(config.cert_path.clone()).await?; + let key = tokio::fs::read(config.key_path.clone()).await?; + let identity = tonic::transport::Identity::from_pem(cert, key); + tracing::info!( + grpc_address = listen_address.to_string().as_str(), + "[Data Service] Starting gRPC server with TLS." + ); + tasks.push(tokio::spawn(async move { + Server::builder() + .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) + .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) + .tls_config(tonic::transport::ServerTlsConfig::new().identity(identity))? + .add_service(svc) + .add_service(reflection_service) + .serve(listen_address) + .await + .map_err(|e| anyhow::anyhow!(e)) + })); + } + + futures::future::try_join_all(tasks).await?; + Ok(()) + } + + fn get_server_name(&self) -> String { + SERVER_NAME.to_string() + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/lib.rs new file mode 100644 index 0000000000000..566941502a239 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/lib.rs @@ -0,0 +1,8 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +mod config; +mod metrics; +mod service; + +pub use config::{IndexerGrpcDataServiceConfig, NonTlsConfig, SERVER_NAME}; diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/main.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/main.rs new file mode 100644 index 0000000000000..265054ba3cddd --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/main.rs @@ -0,0 +1,17 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::Result; +use aptos_indexer_grpc_data_service::IndexerGrpcDataServiceConfig; +use aptos_indexer_grpc_server_framework::ServerArgs; +use clap::Parser; + +#[cfg(unix)] +#[global_allocator] +static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + +#[tokio::main] +async fn main() -> Result<()> { + let args = ServerArgs::parse(); + args.run::().await +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/metrics.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/metrics.rs new file mode 100644 index 0000000000000..4813efda1aed9 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/metrics.rs @@ -0,0 +1,164 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_metrics_core::{ + register_gauge_vec, register_int_counter_vec, register_int_gauge_vec, GaugeVec, IntCounterVec, + IntGaugeVec, +}; +use once_cell::sync::Lazy; + +// The `identifier` label at the time of writing (2024-04-08) is always the +// application ID, a globally unique ID. + +/// Latest processed transaction version. +pub static LATEST_PROCESSED_VERSION_PER_PROCESSOR: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "indexer_grpc_data_service_with_user_latest_processed_version", + "Latest processed transaction version", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Number of transactions that served by data service. +pub static PROCESSED_VERSIONS_COUNT_PER_PROCESSOR: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_with_user_processed_versions", + "Number of transactions that have been processed by data service", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Number of errors that data service has encountered. +pub static ERROR_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_error", + "Number of errors that data service has encountered", + &["error_type"] + ) + .unwrap() +}); + +/// Data latency for data service based on latest processed transaction based on selected processor. +pub static PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_grpc_data_service_with_user_latest_data_latency_in_secs", + "Latency of data service based on latest processed transaction", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Count of connections that data service has established. +pub static CONNECTION_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_connection_count_v2", + "Count of connections that data service has established", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Count of the short connections; i.e., < 10 seconds. +pub static SHORT_CONNECTION_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_short_connection_by_user_processor_count", + "Count of the short connections; i.e., < 10 seconds", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Count of bytes transfered to the client. This only represents the bytes prepared and +/// ready to send to the client. This only t It does not represent the bytes actually +/// sent to the client. +/// +/// This is pre stripping, so it may include bytes for transactions that were later +/// stripped. See BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING for post +/// stirpping. +pub static BYTES_READY_TO_TRANSFER_FROM_SERVER: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_bytes_ready_to_transfer_from_server", + "Count of bytes ready to transfer to the client (pre stripping)", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Count of bytes transfered to the client. This only represents the bytes prepared and +/// ready to send to the client. This only t It does not represent the bytes actually +/// sent to the client. +/// +/// This is post stripping, meaning some transactions may have been stripped (removing +/// things such as events, writesets, payload, signature). Compare this with +/// BYTES_READY_TO_TRANSFER_FROM_SERVER to see how many bytes were stripped. +pub static BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING: Lazy = + Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_bytes_ready_to_transfer_from_server_after_stripping", + "Count of bytes ready to transfer to the client (post stripping)", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() + }); + +/// The number of transactions that had data (such as events, writesets, payload, +/// signature) stripped from them due to the `txns_to_strip_filter`. See +/// `strip_transactions` for more. +pub static NUM_TRANSACTIONS_STRIPPED: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_num_transactions_stripped", + "Number of transactions that had data (such as events, writesets, payload, signature) stripped from them", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/service.rs new file mode 100644 index 0000000000000..f2faf42408631 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/service.rs @@ -0,0 +1,1319 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::metrics::{ + BYTES_READY_TO_TRANSFER_FROM_SERVER, BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING, + CONNECTION_COUNT, ERROR_COUNT, LATEST_PROCESSED_VERSION_PER_PROCESSOR, + NUM_TRANSACTIONS_STRIPPED, PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR, + PROCESSED_VERSIONS_COUNT_PER_PROCESSOR, SHORT_CONNECTION_COUNT, +}; +use anyhow::{Context, Result}; +use aptos_indexer_grpc_utils::{ + cache_operator::{CacheBatchGetStatus, CacheCoverageStatus, CacheOperator}, + chunk_transactions, + compression_util::{CacheEntry, StorageFormat}, + config::IndexerGrpcFileStoreConfig, + constants::{ + IndexerGrpcRequestMetadata, GRPC_AUTH_TOKEN_HEADER, GRPC_REQUEST_NAME_HEADER, + MESSAGE_SIZE_LIMIT, REQUEST_HEADER_APTOS_APPLICATION_NAME, REQUEST_HEADER_APTOS_EMAIL, + REQUEST_HEADER_APTOS_IDENTIFIER, REQUEST_HEADER_APTOS_IDENTIFIER_TYPE, + }, + counters::{log_grpc_step, IndexerGrpcStep, NUM_MULTI_FETCH_OVERLAPPED_VERSIONS}, + file_store_operator::FileStoreOperator, + in_memory_cache::InMemoryCache, + time_diff_since_pb_timestamp_in_secs, + types::RedisUrl, +}; +use aptos_moving_average::MovingAverage; +use aptos_protos::{ + indexer::v1::{raw_data_server::RawData, GetTransactionsRequest, TransactionsResponse}, + transaction::v1::{transaction::TxnData, Transaction}, +}; +use aptos_transaction_filter::{BooleanTransactionFilter, Filterable}; +use futures::Stream; +use prost::Message; +use redis::Client; +use std::{ + collections::HashMap, + pin::Pin, + str::FromStr, + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::sync::mpsc::{channel, error::SendTimeoutError}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{Request, Response, Status}; +use tracing::{error, info, warn}; +use uuid::Uuid; + +type ResponseStream = Pin> + Send>>; + +const MOVING_AVERAGE_WINDOW_SIZE: u64 = 10_000; +// When trying to fetch beyond the current head of cache, the server will retry after this duration. +const AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS: u64 = 50; +// When error happens when fetching data from cache and file store, the server will retry after this duration. +// TODO(larry): fix all errors treated as transient errors. +const TRANSIENT_DATA_ERROR_RETRY_SLEEP_DURATION_MS: u64 = 1000; +// This is the time we wait for the file store to be ready. It should only be +// kicked off when there's no metadata in the file store. +const FILE_STORE_METADATA_WAIT_MS: u64 = 2000; + +// The server will retry to send the response to the client and give up after RESPONSE_CHANNEL_SEND_TIMEOUT. +// This is to prevent the server from being occupied by a slow client. +const RESPONSE_CHANNEL_SEND_TIMEOUT: Duration = Duration::from_secs(120); + +const SHORT_CONNECTION_DURATION_IN_SECS: u64 = 10; + +const RESPONSE_HEADER_APTOS_CONNECTION_ID_HEADER: &str = "x-aptos-connection-id"; +const SERVICE_TYPE: &str = "data_service"; + +// Number of times to retry fetching a given txn block from the stores +pub const NUM_DATA_FETCH_RETRIES: u8 = 5; + +// Max number of tasks to reach out to TXN stores with +const MAX_FETCH_TASKS_PER_REQUEST: u64 = 5; +// The number of transactions we store per txn block; this is used to determine max num of tasks +const TRANSACTIONS_PER_STORAGE_BLOCK: u64 = 1000; + +pub struct RawDataServerWrapper { + pub redis_client: Arc, + pub file_store_config: IndexerGrpcFileStoreConfig, + pub data_service_response_channel_size: usize, + pub txns_to_strip_filter: BooleanTransactionFilter, + pub cache_storage_format: StorageFormat, + in_memory_cache: Arc, +} + +// Exclude in_memory-cache +impl std::fmt::Debug for RawDataServerWrapper { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RawDataServerWrapper") + .field("redis_client", &"Arc") + .field("file_store_config", &self.file_store_config) + .field( + "data_service_response_channel_size", + &self.data_service_response_channel_size, + ) + .field("txns_to_strip_filter", &self.txns_to_strip_filter) + .field("cache_storage_format", &self.cache_storage_format) + .finish() + } +} + +impl RawDataServerWrapper { + pub fn new( + redis_address: RedisUrl, + file_store_config: IndexerGrpcFileStoreConfig, + data_service_response_channel_size: usize, + txns_to_strip_filter: BooleanTransactionFilter, + cache_storage_format: StorageFormat, + in_memory_cache: Arc, + ) -> anyhow::Result { + Ok(Self { + redis_client: Arc::new( + redis::Client::open(redis_address.0.clone()).with_context(|| { + format!("Failed to create redis client for {}", redis_address) + })?, + ), + file_store_config, + data_service_response_channel_size, + txns_to_strip_filter, + cache_storage_format, + in_memory_cache, + }) + } +} + +/// Enum to represent the status of the data fetching overall. +enum TransactionsDataStatus { + // Data fetching is successful. + Success(Vec), + // Ahead of current head of cache. + AheadOfCache, +} + +/// RawDataServerWrapper handles the get transactions requests from cache and file store. +#[tonic::async_trait] +impl RawData for RawDataServerWrapper { + type GetTransactionsStream = ResponseStream; + + /// GetTransactionsStream is a streaming GRPC endpoint: + /// 1. Fetches data from cache and file store. + /// 1.1. If the data is beyond the current head of cache, retry after a short sleep. + /// 1.2. If the data is not in cache, fetch the data from file store. + /// 1.3. If the data is not in file store, stream connection will break. + /// 1.4 If error happens, retry after a short sleep. + /// 2. Push data into channel to stream to the client. + /// 2.1. If the channel is full, do not fetch and retry after a short sleep. + async fn get_transactions( + &self, + req: Request, + ) -> Result, Status> { + // Get request identity. The request is already authenticated by the interceptor. + let request_metadata = match get_request_metadata(&req) { + Ok(request_metadata) => request_metadata, + _ => return Result::Err(Status::aborted("Invalid request token")), + }; + CONNECTION_COUNT + .with_label_values(&request_metadata.get_label_values()) + .inc(); + let request = req.into_inner(); + + let transactions_count = request.transactions_count; + + // Response channel to stream the data to the client. + let (tx, rx) = channel(self.data_service_response_channel_size); + let current_version = match &request.starting_version { + Some(version) => *version, + // Live mode if starting version isn't specified + None => self + .in_memory_cache + .latest_version() + .await + .saturating_sub(1), + }; + + let file_store_operator: Box = self.file_store_config.create(); + let file_store_operator = Arc::new(file_store_operator); + + // Adds tracing context for the request. + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceNewRequestReceived, + Some(current_version as i64), + transactions_count.map(|v| (v as i64 + current_version as i64 - 1)), + None, + None, + None, + None, + None, + Some(&request_metadata), + ); + + let redis_client = self.redis_client.clone(); + let cache_storage_format = self.cache_storage_format; + let request_metadata = Arc::new(request_metadata); + let txns_to_strip_filter = self.txns_to_strip_filter.clone(); + let in_memory_cache = self.in_memory_cache.clone(); + tokio::spawn({ + let request_metadata = request_metadata.clone(); + async move { + data_fetcher_task( + redis_client, + file_store_operator, + cache_storage_format, + request_metadata, + transactions_count, + tx, + txns_to_strip_filter, + current_version, + in_memory_cache, + ) + .await; + } + }); + + let output_stream = ReceiverStream::new(rx); + let mut response = Response::new(Box::pin(output_stream) as Self::GetTransactionsStream); + + response.metadata_mut().insert( + RESPONSE_HEADER_APTOS_CONNECTION_ID_HEADER, + tonic::metadata::MetadataValue::from_str(&request_metadata.request_connection_id) + .unwrap(), + ); + Ok(response) + } +} + +enum DataFetchSubTaskResult { + BatchSuccess(Vec>), + Success(Vec), + NoResults, +} + +async fn get_data_with_tasks( + start_version: u64, + transactions_count: Option, + chain_id: u64, + cache_operator: &mut CacheOperator, + file_store_operator: Arc>, + request_metadata: Arc, + cache_storage_format: StorageFormat, + in_memory_cache: Arc, +) -> DataFetchSubTaskResult { + let start_time = Instant::now(); + let in_memory_transactions = in_memory_cache.get_transactions(start_version).await; + if !in_memory_transactions.is_empty() { + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceFetchingDataFromInMemoryCache, + Some(start_version as i64), + Some(in_memory_transactions.last().as_ref().unwrap().version as i64), + None, + None, + Some(start_time.elapsed().as_secs_f64()), + None, + Some(in_memory_transactions.len() as i64), + Some(&request_metadata), + ); + return DataFetchSubTaskResult::BatchSuccess(chunk_transactions( + in_memory_transactions, + MESSAGE_SIZE_LIMIT, + )); + } + let cache_coverage_status = cache_operator + .check_cache_coverage_status(start_version) + .await; + + let num_tasks_to_use = match cache_coverage_status { + Ok(CacheCoverageStatus::DataNotReady) => return DataFetchSubTaskResult::NoResults, + Ok(CacheCoverageStatus::CacheHit(_)) => 1, + Ok(CacheCoverageStatus::CacheEvicted) => match transactions_count { + None => MAX_FETCH_TASKS_PER_REQUEST, + Some(transactions_count) => { + let num_tasks = transactions_count / TRANSACTIONS_PER_STORAGE_BLOCK; + if num_tasks >= MAX_FETCH_TASKS_PER_REQUEST { + // Limit the max tasks to MAX_FETCH_TASKS_PER_REQUEST + MAX_FETCH_TASKS_PER_REQUEST + } else if num_tasks < 1 { + // Limit the min tasks to 1 + 1 + } else { + num_tasks + } + }, + }, + Err(_) => { + error!("[Data Service] Failed to get cache coverage status."); + panic!("Failed to get cache coverage status."); + }, + }; + + let mut tasks = tokio::task::JoinSet::new(); + let mut current_version = start_version; + + for _ in 0..num_tasks_to_use { + tasks.spawn({ + // TODO: arc this instead of cloning + let mut cache_operator = cache_operator.clone(); + let file_store_operator = file_store_operator.clone(); + let request_metadata = request_metadata.clone(); + async move { + get_data_in_task( + current_version, + chain_id, + &mut cache_operator, + file_store_operator, + request_metadata.clone(), + cache_storage_format, + ) + .await + } + }); + // Storage is in block of 1000: we align our current version fetch to the nearest block + current_version += TRANSACTIONS_PER_STORAGE_BLOCK; + current_version -= current_version % TRANSACTIONS_PER_STORAGE_BLOCK; + } + + let mut transactions: Vec> = vec![]; + while let Some(result) = tasks.join_next().await { + match result { + Ok(DataFetchSubTaskResult::Success(txns)) => { + transactions.push(txns); + }, + Ok(DataFetchSubTaskResult::NoResults) => {}, + Err(e) => { + error!( + error = e.to_string(), + "[Data Service] Failed to get data from cache and file store." + ); + panic!("Failed to get data from cache and file store."); + }, + Ok(_) => unreachable!("Fetching from a single task will never return a batch"), + } + } + + if transactions.is_empty() { + DataFetchSubTaskResult::NoResults + } else { + DataFetchSubTaskResult::BatchSuccess(transactions) + } +} + +async fn get_data_in_task( + start_version: u64, + chain_id: u64, + cache_operator: &mut CacheOperator, + file_store_operator: Arc>, + request_metadata: Arc, + cache_storage_format: StorageFormat, +) -> DataFetchSubTaskResult { + let current_batch_start_time = std::time::Instant::now(); + + let fetched = data_fetch( + start_version, + cache_operator, + file_store_operator, + request_metadata.clone(), + cache_storage_format, + ); + + let transaction_data = match fetched.await { + Ok(TransactionsDataStatus::Success(transactions)) => transactions, + Ok(TransactionsDataStatus::AheadOfCache) => { + info!( + start_version = start_version, + request_identifier = request_metadata.request_identifier.as_str(), + processor_name = request_metadata.processor_name.as_str(), + connection_id = request_metadata.request_connection_id.as_str(), + duration_in_secs = current_batch_start_time.elapsed().as_secs_f64(), + service_type = SERVICE_TYPE, + "[Data Service] Requested data is ahead of cache. Sleeping for {} ms.", + AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS, + ); + ahead_of_cache_data_handling().await; + // Retry after a short sleep. + return DataFetchSubTaskResult::NoResults; + }, + Err(e) => { + ERROR_COUNT.with_label_values(&["data_fetch_failed"]).inc(); + data_fetch_error_handling(e, start_version, chain_id).await; + // Retry after a short sleep. + return DataFetchSubTaskResult::NoResults; + }, + }; + DataFetchSubTaskResult::Success(transaction_data) +} + +// This is a task spawned off for servicing a users' request +async fn data_fetcher_task( + redis_client: Arc, + file_store_operator: Arc>, + cache_storage_format: StorageFormat, + request_metadata: Arc, + transactions_count: Option, + tx: tokio::sync::mpsc::Sender>, + txns_to_strip_filter: BooleanTransactionFilter, + mut current_version: u64, + in_memory_cache: Arc, +) { + let mut connection_start_time = Some(std::time::Instant::now()); + let mut transactions_count = transactions_count; + + // Establish redis connection + let conn = match redis_client.get_tokio_connection_manager().await { + Ok(conn) => conn, + Err(e) => { + ERROR_COUNT + .with_label_values(&["redis_connection_failed"]) + .inc(); + // Connection will be dropped anyway, so we ignore the error here. + let _result = tx + .send_timeout( + Err(Status::unavailable( + "[Data Service] Cannot connect to Redis; please retry.", + )), + RESPONSE_CHANNEL_SEND_TIMEOUT, + ) + .await; + error!( + error = e.to_string(), + "[Data Service] Failed to get redis connection." + ); + return; + }, + }; + let mut cache_operator = CacheOperator::new(conn, cache_storage_format); + + // Validate chain id + let mut metadata = file_store_operator.get_file_store_metadata().await; + while metadata.is_none() { + metadata = file_store_operator.get_file_store_metadata().await; + tracing::warn!( + "[File worker] File store metadata not found. Waiting for {} ms.", + FILE_STORE_METADATA_WAIT_MS + ); + tokio::time::sleep(std::time::Duration::from_millis( + FILE_STORE_METADATA_WAIT_MS, + )) + .await; + } + + let metadata_chain_id = metadata.unwrap().chain_id; + + // Validate redis chain id. Must be present by the time it gets here + let chain_id = match cache_operator.get_chain_id().await { + Ok(chain_id) => chain_id.unwrap(), + Err(e) => { + ERROR_COUNT + .with_label_values(&["redis_get_chain_id_failed"]) + .inc(); + // Connection will be dropped anyway, so we ignore the error here. + let _result = tx + .send_timeout( + Err(Status::unavailable( + "[Data Service] Cannot get the chain id from redis; please retry.", + )), + RESPONSE_CHANNEL_SEND_TIMEOUT, + ) + .await; + error!( + error = e.to_string(), + "[Data Service] Failed to get chain id from redis." + ); + return; + }, + }; + + if metadata_chain_id != chain_id { + let _result = tx + .send_timeout( + Err(Status::unavailable("[Data Service] Chain ID mismatch.")), + RESPONSE_CHANNEL_SEND_TIMEOUT, + ) + .await; + error!("[Data Service] Chain ID mismatch.",); + return; + } + + // Data service metrics. + let mut tps_calculator = MovingAverage::new(MOVING_AVERAGE_WINDOW_SIZE); + + loop { + // 1. Fetch data from cache and file store. + let transaction_data = match get_data_with_tasks( + current_version, + transactions_count, + chain_id, + &mut cache_operator, + file_store_operator.clone(), + request_metadata.clone(), + cache_storage_format, + in_memory_cache.clone(), + ) + .await + { + DataFetchSubTaskResult::BatchSuccess(txns) => txns, + DataFetchSubTaskResult::Success(_) => { + unreachable!("Fetching from multiple tasks will never return a single vector") + }, + DataFetchSubTaskResult::NoResults => continue, + }; + + let mut transaction_data = ensure_sequential_transactions(transaction_data); + + // TODO: Unify the truncation logic for start and end. + if let Some(count) = transactions_count { + if count == 0 { + // End the data stream. + // Since the client receives all the data it requested, we don't count it as a short connection. + connection_start_time = None; + break; + } else if (count as usize) < transaction_data.len() { + // Trim the data to the requested end version. + transaction_data.truncate(count as usize); + transactions_count = Some(0); + } else { + transactions_count = Some(count - transaction_data.len() as u64); + } + }; + // Note: this is the protobuf encoded transaction size. + let bytes_ready_to_transfer = transaction_data + .iter() + .map(|t| t.encoded_len()) + .sum::(); + BYTES_READY_TO_TRANSFER_FROM_SERVER + .with_label_values(&request_metadata.get_label_values()) + .inc_by(bytes_ready_to_transfer as u64); + // 2. Push the data to the response channel, i.e. stream the data to the client. + let current_batch_size = transaction_data.as_slice().len(); + let end_of_batch_version = transaction_data.as_slice().last().unwrap().version; + let (resp_items, num_stripped) = get_transactions_responses_builder( + transaction_data, + chain_id as u32, + &txns_to_strip_filter, + ); + NUM_TRANSACTIONS_STRIPPED + .with_label_values(&request_metadata.get_label_values()) + .inc_by(num_stripped as u64); + let bytes_ready_to_transfer_after_stripping = resp_items + .iter() + .flat_map(|response| &response.transactions) + .map(|t| t.encoded_len()) + .sum::(); + BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING + .with_label_values(&request_metadata.get_label_values()) + .inc_by(bytes_ready_to_transfer_after_stripping as u64); + let data_latency_in_secs = resp_items + .last() + .unwrap() + .transactions + .last() + .unwrap() + .timestamp + .as_ref() + .map(time_diff_since_pb_timestamp_in_secs); + + match channel_send_multiple_with_timeout(resp_items, tx.clone(), request_metadata.clone()) + .await + { + Ok(_) => { + // TODO: Reasses whether this metric is useful. + LATEST_PROCESSED_VERSION_PER_PROCESSOR + .with_label_values(&request_metadata.get_label_values()) + .set(end_of_batch_version as i64); + PROCESSED_VERSIONS_COUNT_PER_PROCESSOR + .with_label_values(&request_metadata.get_label_values()) + .inc_by(current_batch_size as u64); + if let Some(data_latency_in_secs) = data_latency_in_secs { + PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR + .with_label_values(&request_metadata.get_label_values()) + .set(data_latency_in_secs); + } + }, + Err(SendTimeoutError::Timeout(_)) => { + warn!("[Data Service] Receiver is full; exiting."); + break; + }, + Err(SendTimeoutError::Closed(_)) => { + warn!("[Data Service] Receiver is closed; exiting."); + break; + }, + } + // 3. Update the current version and record current tps. + tps_calculator.tick_now(current_batch_size as u64); + current_version = end_of_batch_version + 1; + } + info!( + request_identifier = request_metadata.request_identifier.as_str(), + processor_name = request_metadata.processor_name.as_str(), + connection_id = request_metadata.request_connection_id.as_str(), + service_type = SERVICE_TYPE, + "[Data Service] Client disconnected." + ); + if let Some(start_time) = connection_start_time { + if start_time.elapsed().as_secs() < SHORT_CONNECTION_DURATION_IN_SECS { + SHORT_CONNECTION_COUNT + .with_label_values(&request_metadata.get_label_values()) + .inc(); + } + } +} + +/// Takes in multiple batches of transactions, and: +/// 1. De-dupes in the case of overlap (but log to prom metric) +/// 2. Panics in cases of gaps +fn ensure_sequential_transactions(mut batches: Vec>) -> Vec { + // If there's only one, no sorting required + if batches.len() == 1 { + return batches.pop().unwrap(); + } + + // Sort by the first version per batch, ascending + batches.sort_by(|a, b| a.first().unwrap().version.cmp(&b.first().unwrap().version)); + let first_version = batches.first().unwrap().first().unwrap().version; + let last_version = batches.last().unwrap().last().unwrap().version; + let mut transactions: Vec = vec![]; + + let mut prev_start = None; + let mut prev_end = None; + for mut batch in batches { + let mut start_version = batch.first().unwrap().version; + let end_version = batch.last().unwrap().version; + if prev_start.is_some() { + let prev_start = prev_start.unwrap(); + let prev_end = prev_end.unwrap(); + // If this batch is fully contained within the previous batch, skip it + if prev_start <= start_version && prev_end >= end_version { + NUM_MULTI_FETCH_OVERLAPPED_VERSIONS + .with_label_values(&[SERVICE_TYPE, "full"]) + .inc_by(end_version - start_version); + continue; + } + // If this batch overlaps with the previous batch, combine them + if prev_end >= start_version { + NUM_MULTI_FETCH_OVERLAPPED_VERSIONS + .with_label_values(&[SERVICE_TYPE, "partial"]) + .inc_by(prev_end - start_version + 1); + tracing::debug!( + batch_first_version = first_version, + batch_last_version = last_version, + start_version = start_version, + end_version = end_version, + prev_start = ?prev_start, + prev_end = prev_end, + "[Filestore] Overlapping version data" + ); + batch.drain(0..(prev_end - start_version + 1) as usize); + start_version = batch.first().unwrap().version; + } + + // Otherwise there is a gap + if prev_end + 1 != start_version { + NUM_MULTI_FETCH_OVERLAPPED_VERSIONS + .with_label_values(&[SERVICE_TYPE, "gap"]) + .inc_by(prev_end - start_version + 1); + + tracing::error!( + batch_first_version = first_version, + batch_last_version = last_version, + start_version = start_version, + end_version = end_version, + prev_start = ?prev_start, + prev_end = prev_end, + "[Filestore] Gaps or dupes in processing version data" + ); + panic!("[Filestore] Gaps in processing data batch_first_version: {}, batch_last_version: {}, start_version: {}, end_version: {}, prev_start: {:?}, prev_end: {:?}", + first_version, + last_version, + start_version, + end_version, + prev_start, + prev_end, + ); + } + } + + prev_start = Some(start_version); + prev_end = Some(end_version); + transactions.extend(batch); + } + + transactions +} + +/// Builds the response for the get transactions request. Partial batch is ok, i.e., a +/// batch with transactions < 1000. +/// +/// It also returns the number of txns that were stripped. +fn get_transactions_responses_builder( + transactions: Vec, + chain_id: u32, + txns_to_strip_filter: &BooleanTransactionFilter, +) -> (Vec, usize) { + let (stripped_transactions, num_stripped) = + strip_transactions(transactions, txns_to_strip_filter); + let chunks = chunk_transactions(stripped_transactions, MESSAGE_SIZE_LIMIT); + let responses = chunks + .into_iter() + .map(|chunk| TransactionsResponse { + chain_id: Some(chain_id as u64), + transactions: chunk, + }) + .collect(); + (responses, num_stripped) +} + +// This is a CPU bound operation, so we spawn_blocking +async fn deserialize_cached_transactions( + transactions: Vec>, + storage_format: StorageFormat, +) -> anyhow::Result> { + let task = tokio::task::spawn_blocking(move || { + transactions + .into_iter() + .map(|transaction| { + let cache_entry = CacheEntry::new(transaction, storage_format); + cache_entry.into_transaction() + }) + .collect::>() + }) + .await; + task.context("Transaction bytes to CacheEntry deserialization task failed") +} + +/// Fetches data from cache or the file store. It returns the data if it is ready in the cache or file store. +/// Otherwise, it returns the status of the data fetching. +async fn data_fetch( + starting_version: u64, + cache_operator: &mut CacheOperator, + file_store_operator: Arc>, + request_metadata: Arc, + storage_format: StorageFormat, +) -> anyhow::Result { + let current_batch_start_time = std::time::Instant::now(); + let batch_get_result = cache_operator + .batch_get_encoded_proto_data(starting_version) + .await; + + match batch_get_result { + // Data is not ready yet in the cache. + Ok(CacheBatchGetStatus::NotReady) => Ok(TransactionsDataStatus::AheadOfCache), + Ok(CacheBatchGetStatus::Ok(transactions)) => { + let decoding_start_time = std::time::Instant::now(); + let size_in_bytes = transactions + .iter() + .map(|transaction| transaction.len()) + .sum::(); + let num_of_transactions = transactions.len(); + let duration_in_secs = current_batch_start_time.elapsed().as_secs_f64(); + + let transactions = + deserialize_cached_transactions(transactions, storage_format).await?; + let start_version_timestamp = transactions.first().unwrap().timestamp.as_ref(); + let end_version_timestamp = transactions.last().unwrap().timestamp.as_ref(); + + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceDataFetchedCache, + Some(starting_version as i64), + Some(starting_version as i64 + num_of_transactions as i64 - 1), + start_version_timestamp, + end_version_timestamp, + Some(duration_in_secs), + Some(size_in_bytes), + Some(num_of_transactions as i64), + Some(&request_metadata), + ); + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceTxnsDecoded, + Some(starting_version as i64), + Some(starting_version as i64 + num_of_transactions as i64 - 1), + start_version_timestamp, + end_version_timestamp, + Some(decoding_start_time.elapsed().as_secs_f64()), + Some(size_in_bytes), + Some(num_of_transactions as i64), + Some(&request_metadata), + ); + + Ok(TransactionsDataStatus::Success(transactions)) + }, + Ok(CacheBatchGetStatus::EvictedFromCache) => { + let transactions = + data_fetch_from_filestore(starting_version, file_store_operator, request_metadata) + .await?; + Ok(TransactionsDataStatus::Success(transactions)) + }, + Err(e) => Err(e), + } +} + +async fn data_fetch_from_filestore( + starting_version: u64, + file_store_operator: Arc>, + request_metadata: Arc, +) -> anyhow::Result> { + // Data is evicted from the cache. Fetch from file store. + let (transactions, io_duration, decoding_duration) = file_store_operator + .get_transactions_with_durations(starting_version, NUM_DATA_FETCH_RETRIES) + .await?; + let size_in_bytes = transactions + .iter() + .map(|transaction| transaction.encoded_len()) + .sum::(); + let num_of_transactions = transactions.len(); + let start_version_timestamp = transactions.first().unwrap().timestamp.as_ref(); + let end_version_timestamp = transactions.last().unwrap().timestamp.as_ref(); + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceDataFetchedFilestore, + Some(starting_version as i64), + Some(starting_version as i64 + num_of_transactions as i64 - 1), + start_version_timestamp, + end_version_timestamp, + Some(io_duration), + Some(size_in_bytes), + Some(num_of_transactions as i64), + Some(&request_metadata), + ); + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceTxnsDecoded, + Some(starting_version as i64), + Some(starting_version as i64 + num_of_transactions as i64 - 1), + start_version_timestamp, + end_version_timestamp, + Some(decoding_duration), + Some(size_in_bytes), + Some(num_of_transactions as i64), + Some(&request_metadata), + ); + Ok(transactions) +} + +/// Handles the case when the data is not ready in the cache, i.e., beyond the current head. +async fn ahead_of_cache_data_handling() { + // TODO: add exponential backoff. + tokio::time::sleep(Duration::from_millis( + AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS, + )) + .await; +} + +/// Handles data fetch errors, including cache and file store related errors. +async fn data_fetch_error_handling(err: anyhow::Error, current_version: u64, chain_id: u64) { + error!( + chain_id = chain_id, + current_version = current_version, + "[Data Service] Failed to fetch data from cache and file store. {:?}", + err + ); + tokio::time::sleep(Duration::from_millis( + TRANSIENT_DATA_ERROR_RETRY_SLEEP_DURATION_MS, + )) + .await; +} + +/// Gets the request metadata. Useful for logging. +fn get_request_metadata( + req: &Request, +) -> tonic::Result { + let request_metadata_pairs = vec![ + ( + "request_identifier_type", + REQUEST_HEADER_APTOS_IDENTIFIER_TYPE, + ), + ("request_identifier", REQUEST_HEADER_APTOS_IDENTIFIER), + ("request_email", REQUEST_HEADER_APTOS_EMAIL), + ( + "request_application_name", + REQUEST_HEADER_APTOS_APPLICATION_NAME, + ), + ("request_token", GRPC_AUTH_TOKEN_HEADER), + ("processor_name", GRPC_REQUEST_NAME_HEADER), + ]; + let mut request_metadata_map: HashMap = request_metadata_pairs + .into_iter() + .map(|(key, value)| { + ( + key.to_string(), + req.metadata() + .get(value) + .map(|value| value.to_str().unwrap_or("unspecified").to_string()) + .unwrap_or("unspecified".to_string()), + ) + }) + .collect(); + request_metadata_map.insert( + "request_connection_id".to_string(), + Uuid::new_v4().to_string(), + ); + let request_metadata: IndexerGrpcRequestMetadata = + serde_json::from_str(&serde_json::to_string(&request_metadata_map).unwrap()).unwrap(); + // TODO: update the request name if these are internal requests. + Ok(request_metadata) +} + +async fn channel_send_multiple_with_timeout( + resp_items: Vec, + tx: tokio::sync::mpsc::Sender>, + request_metadata: Arc, +) -> Result<(), SendTimeoutError>> { + let overall_send_start_time = Instant::now(); + let overall_size_in_bytes = resp_items + .iter() + .map(|resp_item| resp_item.encoded_len()) + .sum::(); + let overall_start_txn = resp_items.first().unwrap().transactions.first().unwrap(); + let overall_end_txn = resp_items.last().unwrap().transactions.last().unwrap(); + let overall_start_version = overall_start_txn.version; + let overall_end_version = overall_end_txn.version; + let overall_start_txn_timestamp = overall_start_txn.clone().timestamp; + let overall_end_txn_timestamp = overall_end_txn.clone().timestamp; + + for resp_item in resp_items { + let send_start_time = Instant::now(); + let response_size = resp_item.encoded_len(); + let num_of_transactions = resp_item.transactions.len(); + let start_version = resp_item.transactions.first().unwrap().version; + let end_version = resp_item.transactions.last().unwrap().version; + let start_version_txn_timestamp = resp_item + .transactions + .first() + .unwrap() + .timestamp + .as_ref() + .unwrap(); + let end_version_txn_timestamp = resp_item + .transactions + .last() + .unwrap() + .timestamp + .as_ref() + .unwrap(); + + tx.send_timeout( + Result::::Ok(resp_item.clone()), + RESPONSE_CHANNEL_SEND_TIMEOUT, + ) + .await?; + + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceChunkSent, + Some(start_version as i64), + Some(end_version as i64), + Some(start_version_txn_timestamp), + Some(end_version_txn_timestamp), + Some(send_start_time.elapsed().as_secs_f64()), + Some(response_size), + Some(num_of_transactions as i64), + Some(&request_metadata), + ); + } + + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceAllChunksSent, + Some(overall_start_version as i64), + Some(overall_end_version as i64), + overall_start_txn_timestamp.as_ref(), + overall_end_txn_timestamp.as_ref(), + Some(overall_send_start_time.elapsed().as_secs_f64()), + Some(overall_size_in_bytes), + Some((overall_end_version - overall_start_version + 1) as i64), + Some(&request_metadata), + ); + + Ok(()) +} + +/// This function strips transactions that match the given filter. Stripping means we +/// remove the payload, signature, events, and writesets. Note, the filter can be +/// composed of many conditions, see `BooleanTransactionFilter` for more. +/// +/// This returns the mutated txns and the number of txns that were stripped. +fn strip_transactions( + transactions: Vec, + txns_to_strip_filter: &BooleanTransactionFilter, +) -> (Vec, usize) { + let mut stripped_count = 0; + + let stripped_transactions: Vec = transactions + .into_iter() + .map(|mut txn| { + // Note: `is_allowed` means the txn matches the filter, in which case + // we strip it. + if txns_to_strip_filter.is_allowed(&txn) { + stripped_count += 1; + if let Some(info) = txn.info.as_mut() { + info.changes = vec![]; + } + if let Some(TxnData::User(user_transaction)) = txn.txn_data.as_mut() { + user_transaction.events = vec![]; + if let Some(utr) = user_transaction.request.as_mut() { + // Wipe the payload and signature. + utr.payload = None; + utr.signature = None; + } + } + } + txn + }) + .collect(); + + (stripped_transactions, stripped_count) +} + +#[cfg(test)] +mod tests { + use super::*; + use aptos_protos::transaction::v1::{ + transaction::TxnData, transaction_payload::Payload, EntryFunctionId, EntryFunctionPayload, + Event, MoveModuleId, Signature, Transaction, TransactionInfo, TransactionPayload, + UserTransaction, UserTransactionRequest, WriteSetChange, + }; + use aptos_transaction_filter::{ + boolean_transaction_filter::APIFilter, filters::UserTransactionFilterBuilder, + EntryFunctionFilterBuilder, UserTransactionPayloadFilterBuilder, + }; + + fn create_test_transaction( + module_address: String, + module_name: String, + function_name: String, + ) -> Transaction { + Transaction { + version: 1, + txn_data: Some(TxnData::User(UserTransaction { + request: Some(UserTransactionRequest { + payload: Some(TransactionPayload { + r#type: 1, + payload: Some(Payload::EntryFunctionPayload(EntryFunctionPayload { + function: Some(EntryFunctionId { + module: Some(MoveModuleId { + address: module_address, + name: module_name, + }), + name: function_name, + }), + ..Default::default() + })), + }), + signature: Some(Signature::default()), + ..Default::default() + }), + events: vec![Event::default()], + })), + info: Some(TransactionInfo { + changes: vec![WriteSetChange::default()], + ..Default::default() + }), + ..Default::default() + } + } + + #[test] + fn test_ensure_sequential_transactions_merges_and_sorts() { + let transactions1 = (1..5) + .map(|i| Transaction { + version: i, + ..Default::default() + }) + .collect(); + let transactions2 = (5..10) + .map(|i| Transaction { + version: i, + ..Default::default() + }) + .collect(); + // No overlap, just normal fetching flow + let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); + assert_eq!(transactions1.len(), 9); + assert_eq!(transactions1.first().unwrap().version, 1); + assert_eq!(transactions1.last().unwrap().version, 9); + + // This is a full overlap + let transactions2 = (5..7) + .map(|i| Transaction { + version: i, + ..Default::default() + }) + .collect(); + let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); + assert_eq!(transactions1.len(), 9); + assert_eq!(transactions1.first().unwrap().version, 1); + assert_eq!(transactions1.last().unwrap().version, 9); + + // Partial overlap + let transactions2 = (5..12) + .map(|i| Transaction { + version: i, + ..Default::default() + }) + .collect(); + let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); + assert_eq!(transactions1.len(), 11); + assert_eq!(transactions1.first().unwrap().version, 1); + assert_eq!(transactions1.last().unwrap().version, 11); + } + + const MODULE_ADDRESS: &str = "0x1234"; + const MODULE_NAME: &str = "module"; + const FUNCTION_NAME: &str = "function"; + + #[test] + fn test_transactions_are_stripped_correctly_sender_addresses() { + let sender_address = "0x1234".to_string(); + // Create a transaction with a user transaction + let txn = Transaction { + version: 1, + txn_data: Some(TxnData::User(UserTransaction { + request: Some(UserTransactionRequest { + sender: sender_address.clone(), + payload: Some(TransactionPayload::default()), + signature: Some(Signature::default()), + ..Default::default() + }), + events: vec![Event::default()], + })), + info: Some(TransactionInfo { + changes: vec![WriteSetChange::default()], + ..Default::default() + }), + ..Default::default() + }; + + // Create filter for senders to ignore. + let sender_filters = vec![sender_address] + .into_iter() + .map(|address| { + BooleanTransactionFilter::from(APIFilter::UserTransactionFilter( + UserTransactionFilterBuilder::default() + .sender(address) + .build() + .unwrap(), + )) + }) + .collect(); + let filter = BooleanTransactionFilter::new_or(sender_filters); + + let (filtered_txns, num_stripped) = strip_transactions(vec![txn], &filter); + assert_eq!(num_stripped, 1); + assert_eq!(filtered_txns.len(), 1); + let txn = filtered_txns.first().unwrap(); + let user_transaction = match &txn.txn_data { + Some(TxnData::User(user_transaction)) => user_transaction, + _ => panic!("Expected user transaction"), + }; + assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); + assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); + assert_eq!(user_transaction.events.len(), 0); + assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); + } + + #[test] + fn test_transactions_are_stripped_correctly_module_address() { + let txn = create_test_transaction( + MODULE_ADDRESS.to_string(), + MODULE_NAME.to_string(), + FUNCTION_NAME.to_string(), + ); + // Testing filter with only address set + let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( + APIFilter::UserTransactionFilter( + UserTransactionFilterBuilder::default() + .payload( + UserTransactionPayloadFilterBuilder::default() + .function( + EntryFunctionFilterBuilder::default() + .address(MODULE_ADDRESS.to_string()) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ), + )]); + + let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); + assert_eq!(num_stripped, 1); + assert_eq!(filtered_txns.len(), 1); + let txn = filtered_txns.first().unwrap(); + let user_transaction = match &txn.txn_data { + Some(TxnData::User(user_transaction)) => user_transaction, + _ => panic!("Expected user transaction"), + }; + assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); + assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); + assert_eq!(user_transaction.events.len(), 0); + assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); + } + + #[test] + fn test_transactions_are_stripped_correctly_module_name() { + let txn = create_test_transaction( + MODULE_ADDRESS.to_string(), + MODULE_NAME.to_string(), + FUNCTION_NAME.to_string(), + ); + // Testing filter with only module set + let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( + APIFilter::UserTransactionFilter( + UserTransactionFilterBuilder::default() + .payload( + UserTransactionPayloadFilterBuilder::default() + .function( + EntryFunctionFilterBuilder::default() + .module(MODULE_NAME.to_string()) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ), + )]); + + let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); + assert_eq!(num_stripped, 1); + assert_eq!(filtered_txns.len(), 1); + let txn = filtered_txns.first().unwrap(); + let user_transaction = match &txn.txn_data { + Some(TxnData::User(user_transaction)) => user_transaction, + _ => panic!("Expected user transaction"), + }; + assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); + assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); + assert_eq!(user_transaction.events.len(), 0); + assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); + } + + #[test] + fn test_transactions_are_stripped_correctly_function_name() { + let txn = create_test_transaction( + MODULE_ADDRESS.to_string(), + MODULE_NAME.to_string(), + FUNCTION_NAME.to_string(), + ); + // Testing filter with only function set + let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( + APIFilter::UserTransactionFilter( + UserTransactionFilterBuilder::default() + .payload( + UserTransactionPayloadFilterBuilder::default() + .function( + EntryFunctionFilterBuilder::default() + .function(FUNCTION_NAME.to_string()) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ), + )]); + + let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); + assert_eq!(num_stripped, 1); + assert_eq!(filtered_txns.len(), 1); + let txn = filtered_txns.first().unwrap(); + let user_transaction = match &txn.txn_data { + Some(TxnData::User(user_transaction)) => user_transaction, + _ => panic!("Expected user transaction"), + }; + assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); + assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); + assert_eq!(user_transaction.events.len(), 0); + assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); + } + #[test] + fn test_transactions_are_not_stripped() { + let txn = create_test_transaction( + MODULE_ADDRESS.to_string(), + MODULE_NAME.to_string(), + FUNCTION_NAME.to_string(), + ); + // Testing filter with wrong filter + let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( + APIFilter::UserTransactionFilter( + UserTransactionFilterBuilder::default() + .payload( + UserTransactionPayloadFilterBuilder::default() + .function( + EntryFunctionFilterBuilder::default() + .function("0xrandom".to_string()) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ), + )]); + + let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); + assert_eq!(num_stripped, 0); + assert_eq!(filtered_txns.len(), 1); + let txn = filtered_txns.first().unwrap(); + let user_transaction = match &txn.txn_data { + Some(TxnData::User(user_transaction)) => user_transaction, + _ => panic!("Expected user transaction"), + }; + assert_ne!(user_transaction.request.as_ref().unwrap().payload, None); + assert_ne!(user_transaction.request.as_ref().unwrap().signature, None); + assert_ne!(user_transaction.events.len(), 0); + assert_ne!(txn.info.as_ref().unwrap().changes.len(), 0); + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs index f2faf42408631..0629f88f4d94b 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs @@ -1,50 +1,24 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::metrics::{ - BYTES_READY_TO_TRANSFER_FROM_SERVER, BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING, - CONNECTION_COUNT, ERROR_COUNT, LATEST_PROCESSED_VERSION_PER_PROCESSOR, - NUM_TRANSACTIONS_STRIPPED, PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR, - PROCESSED_VERSIONS_COUNT_PER_PROCESSOR, SHORT_CONNECTION_COUNT, -}; use anyhow::{Context, Result}; use aptos_indexer_grpc_utils::{ - cache_operator::{CacheBatchGetStatus, CacheCoverageStatus, CacheOperator}, chunk_transactions, compression_util::{CacheEntry, StorageFormat}, - config::IndexerGrpcFileStoreConfig, - constants::{ - IndexerGrpcRequestMetadata, GRPC_AUTH_TOKEN_HEADER, GRPC_REQUEST_NAME_HEADER, - MESSAGE_SIZE_LIMIT, REQUEST_HEADER_APTOS_APPLICATION_NAME, REQUEST_HEADER_APTOS_EMAIL, - REQUEST_HEADER_APTOS_IDENTIFIER, REQUEST_HEADER_APTOS_IDENTIFIER_TYPE, - }, - counters::{log_grpc_step, IndexerGrpcStep, NUM_MULTI_FETCH_OVERLAPPED_VERSIONS}, + constants::MESSAGE_SIZE_LIMIT, file_store_operator::FileStoreOperator, - in_memory_cache::InMemoryCache, - time_diff_since_pb_timestamp_in_secs, - types::RedisUrl, }; use aptos_moving_average::MovingAverage; use aptos_protos::{ indexer::v1::{raw_data_server::RawData, GetTransactionsRequest, TransactionsResponse}, - transaction::v1::{transaction::TxnData, Transaction}, + transaction::v1::Transaction, }; -use aptos_transaction_filter::{BooleanTransactionFilter, Filterable}; use futures::Stream; -use prost::Message; -use redis::Client; -use std::{ - collections::HashMap, - pin::Pin, - str::FromStr, - sync::Arc, - time::{Duration, Instant}, -}; -use tokio::sync::mpsc::{channel, error::SendTimeoutError}; +use std::{pin::Pin, sync::Arc, time::Duration}; +use tokio::sync::mpsc::{channel, error::SendTimeoutError, Sender}; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status}; -use tracing::{error, info, warn}; -use uuid::Uuid; +use tracing::{error, warn}; type ResponseStream = Pin> + Send>>; @@ -52,7 +26,6 @@ const MOVING_AVERAGE_WINDOW_SIZE: u64 = 10_000; // When trying to fetch beyond the current head of cache, the server will retry after this duration. const AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS: u64 = 50; // When error happens when fetching data from cache and file store, the server will retry after this duration. -// TODO(larry): fix all errors treated as transient errors. const TRANSIENT_DATA_ERROR_RETRY_SLEEP_DURATION_MS: u64 = 1000; // This is the time we wait for the file store to be ready. It should only be // kicked off when there's no metadata in the file store. @@ -62,11 +35,6 @@ const FILE_STORE_METADATA_WAIT_MS: u64 = 2000; // This is to prevent the server from being occupied by a slow client. const RESPONSE_CHANNEL_SEND_TIMEOUT: Duration = Duration::from_secs(120); -const SHORT_CONNECTION_DURATION_IN_SECS: u64 = 10; - -const RESPONSE_HEADER_APTOS_CONNECTION_ID_HEADER: &str = "x-aptos-connection-id"; -const SERVICE_TYPE: &str = "data_service"; - // Number of times to retry fetching a given txn block from the stores pub const NUM_DATA_FETCH_RETRIES: u8 = 5; @@ -76,624 +44,57 @@ const MAX_FETCH_TASKS_PER_REQUEST: u64 = 5; const TRANSACTIONS_PER_STORAGE_BLOCK: u64 = 1000; pub struct RawDataServerWrapper { - pub redis_client: Arc, - pub file_store_config: IndexerGrpcFileStoreConfig, + handler_tx: Sender<( + GetTransactionsRequest, + Sender>, + )>, pub data_service_response_channel_size: usize, - pub txns_to_strip_filter: BooleanTransactionFilter, - pub cache_storage_format: StorageFormat, - in_memory_cache: Arc, -} - -// Exclude in_memory-cache -impl std::fmt::Debug for RawDataServerWrapper { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("RawDataServerWrapper") - .field("redis_client", &"Arc") - .field("file_store_config", &self.file_store_config) - .field( - "data_service_response_channel_size", - &self.data_service_response_channel_size, - ) - .field("txns_to_strip_filter", &self.txns_to_strip_filter) - .field("cache_storage_format", &self.cache_storage_format) - .finish() - } } impl RawDataServerWrapper { pub fn new( - redis_address: RedisUrl, - file_store_config: IndexerGrpcFileStoreConfig, + handler_tx: Sender<( + GetTransactionsRequest, + Sender>, + )>, data_service_response_channel_size: usize, - txns_to_strip_filter: BooleanTransactionFilter, - cache_storage_format: StorageFormat, - in_memory_cache: Arc, ) -> anyhow::Result { Ok(Self { - redis_client: Arc::new( - redis::Client::open(redis_address.0.clone()).with_context(|| { - format!("Failed to create redis client for {}", redis_address) - })?, - ), - file_store_config, + handler_tx, data_service_response_channel_size, - txns_to_strip_filter, - cache_storage_format, - in_memory_cache, }) } } -/// Enum to represent the status of the data fetching overall. -enum TransactionsDataStatus { - // Data fetching is successful. - Success(Vec), - // Ahead of current head of cache. - AheadOfCache, -} - -/// RawDataServerWrapper handles the get transactions requests from cache and file store. #[tonic::async_trait] impl RawData for RawDataServerWrapper { type GetTransactionsStream = ResponseStream; - /// GetTransactionsStream is a streaming GRPC endpoint: - /// 1. Fetches data from cache and file store. - /// 1.1. If the data is beyond the current head of cache, retry after a short sleep. - /// 1.2. If the data is not in cache, fetch the data from file store. - /// 1.3. If the data is not in file store, stream connection will break. - /// 1.4 If error happens, retry after a short sleep. - /// 2. Push data into channel to stream to the client. - /// 2.1. If the channel is full, do not fetch and retry after a short sleep. async fn get_transactions( &self, req: Request, ) -> Result, Status> { // Get request identity. The request is already authenticated by the interceptor. - let request_metadata = match get_request_metadata(&req) { - Ok(request_metadata) => request_metadata, - _ => return Result::Err(Status::aborted("Invalid request token")), - }; - CONNECTION_COUNT - .with_label_values(&request_metadata.get_label_values()) - .inc(); let request = req.into_inner(); - let transactions_count = request.transactions_count; + tracing::info!("Request: {request:?}."); // Response channel to stream the data to the client. let (tx, rx) = channel(self.data_service_response_channel_size); - let current_version = match &request.starting_version { - Some(version) => *version, - // Live mode if starting version isn't specified - None => self - .in_memory_cache - .latest_version() - .await - .saturating_sub(1), - }; - - let file_store_operator: Box = self.file_store_config.create(); - let file_store_operator = Arc::new(file_store_operator); - - // Adds tracing context for the request. - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceNewRequestReceived, - Some(current_version as i64), - transactions_count.map(|v| (v as i64 + current_version as i64 - 1)), - None, - None, - None, - None, - None, - Some(&request_metadata), - ); - - let redis_client = self.redis_client.clone(); - let cache_storage_format = self.cache_storage_format; - let request_metadata = Arc::new(request_metadata); - let txns_to_strip_filter = self.txns_to_strip_filter.clone(); - let in_memory_cache = self.in_memory_cache.clone(); - tokio::spawn({ - let request_metadata = request_metadata.clone(); - async move { - data_fetcher_task( - redis_client, - file_store_operator, - cache_storage_format, - request_metadata, - transactions_count, - tx, - txns_to_strip_filter, - current_version, - in_memory_cache, - ) - .await; - } - }); + self.handler_tx.send((request, tx)).await.unwrap(); let output_stream = ReceiverStream::new(rx); - let mut response = Response::new(Box::pin(output_stream) as Self::GetTransactionsStream); + let response = Response::new(Box::pin(output_stream) as Self::GetTransactionsStream); - response.metadata_mut().insert( - RESPONSE_HEADER_APTOS_CONNECTION_ID_HEADER, - tonic::metadata::MetadataValue::from_str(&request_metadata.request_connection_id) - .unwrap(), - ); Ok(response) } } -enum DataFetchSubTaskResult { - BatchSuccess(Vec>), - Success(Vec), - NoResults, -} - -async fn get_data_with_tasks( - start_version: u64, - transactions_count: Option, - chain_id: u64, - cache_operator: &mut CacheOperator, - file_store_operator: Arc>, - request_metadata: Arc, - cache_storage_format: StorageFormat, - in_memory_cache: Arc, -) -> DataFetchSubTaskResult { - let start_time = Instant::now(); - let in_memory_transactions = in_memory_cache.get_transactions(start_version).await; - if !in_memory_transactions.is_empty() { - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceFetchingDataFromInMemoryCache, - Some(start_version as i64), - Some(in_memory_transactions.last().as_ref().unwrap().version as i64), - None, - None, - Some(start_time.elapsed().as_secs_f64()), - None, - Some(in_memory_transactions.len() as i64), - Some(&request_metadata), - ); - return DataFetchSubTaskResult::BatchSuccess(chunk_transactions( - in_memory_transactions, - MESSAGE_SIZE_LIMIT, - )); - } - let cache_coverage_status = cache_operator - .check_cache_coverage_status(start_version) - .await; - - let num_tasks_to_use = match cache_coverage_status { - Ok(CacheCoverageStatus::DataNotReady) => return DataFetchSubTaskResult::NoResults, - Ok(CacheCoverageStatus::CacheHit(_)) => 1, - Ok(CacheCoverageStatus::CacheEvicted) => match transactions_count { - None => MAX_FETCH_TASKS_PER_REQUEST, - Some(transactions_count) => { - let num_tasks = transactions_count / TRANSACTIONS_PER_STORAGE_BLOCK; - if num_tasks >= MAX_FETCH_TASKS_PER_REQUEST { - // Limit the max tasks to MAX_FETCH_TASKS_PER_REQUEST - MAX_FETCH_TASKS_PER_REQUEST - } else if num_tasks < 1 { - // Limit the min tasks to 1 - 1 - } else { - num_tasks - } - }, - }, - Err(_) => { - error!("[Data Service] Failed to get cache coverage status."); - panic!("Failed to get cache coverage status."); - }, - }; - - let mut tasks = tokio::task::JoinSet::new(); - let mut current_version = start_version; - - for _ in 0..num_tasks_to_use { - tasks.spawn({ - // TODO: arc this instead of cloning - let mut cache_operator = cache_operator.clone(); - let file_store_operator = file_store_operator.clone(); - let request_metadata = request_metadata.clone(); - async move { - get_data_in_task( - current_version, - chain_id, - &mut cache_operator, - file_store_operator, - request_metadata.clone(), - cache_storage_format, - ) - .await - } - }); - // Storage is in block of 1000: we align our current version fetch to the nearest block - current_version += TRANSACTIONS_PER_STORAGE_BLOCK; - current_version -= current_version % TRANSACTIONS_PER_STORAGE_BLOCK; - } - - let mut transactions: Vec> = vec![]; - while let Some(result) = tasks.join_next().await { - match result { - Ok(DataFetchSubTaskResult::Success(txns)) => { - transactions.push(txns); - }, - Ok(DataFetchSubTaskResult::NoResults) => {}, - Err(e) => { - error!( - error = e.to_string(), - "[Data Service] Failed to get data from cache and file store." - ); - panic!("Failed to get data from cache and file store."); - }, - Ok(_) => unreachable!("Fetching from a single task will never return a batch"), - } - } - - if transactions.is_empty() { - DataFetchSubTaskResult::NoResults - } else { - DataFetchSubTaskResult::BatchSuccess(transactions) - } -} - -async fn get_data_in_task( - start_version: u64, - chain_id: u64, - cache_operator: &mut CacheOperator, - file_store_operator: Arc>, - request_metadata: Arc, - cache_storage_format: StorageFormat, -) -> DataFetchSubTaskResult { - let current_batch_start_time = std::time::Instant::now(); - - let fetched = data_fetch( - start_version, - cache_operator, - file_store_operator, - request_metadata.clone(), - cache_storage_format, - ); - - let transaction_data = match fetched.await { - Ok(TransactionsDataStatus::Success(transactions)) => transactions, - Ok(TransactionsDataStatus::AheadOfCache) => { - info!( - start_version = start_version, - request_identifier = request_metadata.request_identifier.as_str(), - processor_name = request_metadata.processor_name.as_str(), - connection_id = request_metadata.request_connection_id.as_str(), - duration_in_secs = current_batch_start_time.elapsed().as_secs_f64(), - service_type = SERVICE_TYPE, - "[Data Service] Requested data is ahead of cache. Sleeping for {} ms.", - AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS, - ); - ahead_of_cache_data_handling().await; - // Retry after a short sleep. - return DataFetchSubTaskResult::NoResults; - }, - Err(e) => { - ERROR_COUNT.with_label_values(&["data_fetch_failed"]).inc(); - data_fetch_error_handling(e, start_version, chain_id).await; - // Retry after a short sleep. - return DataFetchSubTaskResult::NoResults; - }, - }; - DataFetchSubTaskResult::Success(transaction_data) -} - -// This is a task spawned off for servicing a users' request -async fn data_fetcher_task( - redis_client: Arc, - file_store_operator: Arc>, - cache_storage_format: StorageFormat, - request_metadata: Arc, - transactions_count: Option, - tx: tokio::sync::mpsc::Sender>, - txns_to_strip_filter: BooleanTransactionFilter, - mut current_version: u64, - in_memory_cache: Arc, -) { - let mut connection_start_time = Some(std::time::Instant::now()); - let mut transactions_count = transactions_count; - - // Establish redis connection - let conn = match redis_client.get_tokio_connection_manager().await { - Ok(conn) => conn, - Err(e) => { - ERROR_COUNT - .with_label_values(&["redis_connection_failed"]) - .inc(); - // Connection will be dropped anyway, so we ignore the error here. - let _result = tx - .send_timeout( - Err(Status::unavailable( - "[Data Service] Cannot connect to Redis; please retry.", - )), - RESPONSE_CHANNEL_SEND_TIMEOUT, - ) - .await; - error!( - error = e.to_string(), - "[Data Service] Failed to get redis connection." - ); - return; - }, - }; - let mut cache_operator = CacheOperator::new(conn, cache_storage_format); - - // Validate chain id - let mut metadata = file_store_operator.get_file_store_metadata().await; - while metadata.is_none() { - metadata = file_store_operator.get_file_store_metadata().await; - tracing::warn!( - "[File worker] File store metadata not found. Waiting for {} ms.", - FILE_STORE_METADATA_WAIT_MS - ); - tokio::time::sleep(std::time::Duration::from_millis( - FILE_STORE_METADATA_WAIT_MS, - )) - .await; - } - - let metadata_chain_id = metadata.unwrap().chain_id; - - // Validate redis chain id. Must be present by the time it gets here - let chain_id = match cache_operator.get_chain_id().await { - Ok(chain_id) => chain_id.unwrap(), - Err(e) => { - ERROR_COUNT - .with_label_values(&["redis_get_chain_id_failed"]) - .inc(); - // Connection will be dropped anyway, so we ignore the error here. - let _result = tx - .send_timeout( - Err(Status::unavailable( - "[Data Service] Cannot get the chain id from redis; please retry.", - )), - RESPONSE_CHANNEL_SEND_TIMEOUT, - ) - .await; - error!( - error = e.to_string(), - "[Data Service] Failed to get chain id from redis." - ); - return; - }, - }; - - if metadata_chain_id != chain_id { - let _result = tx - .send_timeout( - Err(Status::unavailable("[Data Service] Chain ID mismatch.")), - RESPONSE_CHANNEL_SEND_TIMEOUT, - ) - .await; - error!("[Data Service] Chain ID mismatch.",); - return; - } - - // Data service metrics. - let mut tps_calculator = MovingAverage::new(MOVING_AVERAGE_WINDOW_SIZE); - - loop { - // 1. Fetch data from cache and file store. - let transaction_data = match get_data_with_tasks( - current_version, - transactions_count, - chain_id, - &mut cache_operator, - file_store_operator.clone(), - request_metadata.clone(), - cache_storage_format, - in_memory_cache.clone(), - ) - .await - { - DataFetchSubTaskResult::BatchSuccess(txns) => txns, - DataFetchSubTaskResult::Success(_) => { - unreachable!("Fetching from multiple tasks will never return a single vector") - }, - DataFetchSubTaskResult::NoResults => continue, - }; - - let mut transaction_data = ensure_sequential_transactions(transaction_data); - - // TODO: Unify the truncation logic for start and end. - if let Some(count) = transactions_count { - if count == 0 { - // End the data stream. - // Since the client receives all the data it requested, we don't count it as a short connection. - connection_start_time = None; - break; - } else if (count as usize) < transaction_data.len() { - // Trim the data to the requested end version. - transaction_data.truncate(count as usize); - transactions_count = Some(0); - } else { - transactions_count = Some(count - transaction_data.len() as u64); - } - }; - // Note: this is the protobuf encoded transaction size. - let bytes_ready_to_transfer = transaction_data - .iter() - .map(|t| t.encoded_len()) - .sum::(); - BYTES_READY_TO_TRANSFER_FROM_SERVER - .with_label_values(&request_metadata.get_label_values()) - .inc_by(bytes_ready_to_transfer as u64); - // 2. Push the data to the response channel, i.e. stream the data to the client. - let current_batch_size = transaction_data.as_slice().len(); - let end_of_batch_version = transaction_data.as_slice().last().unwrap().version; - let (resp_items, num_stripped) = get_transactions_responses_builder( - transaction_data, - chain_id as u32, - &txns_to_strip_filter, - ); - NUM_TRANSACTIONS_STRIPPED - .with_label_values(&request_metadata.get_label_values()) - .inc_by(num_stripped as u64); - let bytes_ready_to_transfer_after_stripping = resp_items - .iter() - .flat_map(|response| &response.transactions) - .map(|t| t.encoded_len()) - .sum::(); - BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING - .with_label_values(&request_metadata.get_label_values()) - .inc_by(bytes_ready_to_transfer_after_stripping as u64); - let data_latency_in_secs = resp_items - .last() - .unwrap() - .transactions - .last() - .unwrap() - .timestamp - .as_ref() - .map(time_diff_since_pb_timestamp_in_secs); - - match channel_send_multiple_with_timeout(resp_items, tx.clone(), request_metadata.clone()) - .await - { - Ok(_) => { - // TODO: Reasses whether this metric is useful. - LATEST_PROCESSED_VERSION_PER_PROCESSOR - .with_label_values(&request_metadata.get_label_values()) - .set(end_of_batch_version as i64); - PROCESSED_VERSIONS_COUNT_PER_PROCESSOR - .with_label_values(&request_metadata.get_label_values()) - .inc_by(current_batch_size as u64); - if let Some(data_latency_in_secs) = data_latency_in_secs { - PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR - .with_label_values(&request_metadata.get_label_values()) - .set(data_latency_in_secs); - } - }, - Err(SendTimeoutError::Timeout(_)) => { - warn!("[Data Service] Receiver is full; exiting."); - break; - }, - Err(SendTimeoutError::Closed(_)) => { - warn!("[Data Service] Receiver is closed; exiting."); - break; - }, - } - // 3. Update the current version and record current tps. - tps_calculator.tick_now(current_batch_size as u64); - current_version = end_of_batch_version + 1; - } - info!( - request_identifier = request_metadata.request_identifier.as_str(), - processor_name = request_metadata.processor_name.as_str(), - connection_id = request_metadata.request_connection_id.as_str(), - service_type = SERVICE_TYPE, - "[Data Service] Client disconnected." - ); - if let Some(start_time) = connection_start_time { - if start_time.elapsed().as_secs() < SHORT_CONNECTION_DURATION_IN_SECS { - SHORT_CONNECTION_COUNT - .with_label_values(&request_metadata.get_label_values()) - .inc(); - } - } -} - -/// Takes in multiple batches of transactions, and: -/// 1. De-dupes in the case of overlap (but log to prom metric) -/// 2. Panics in cases of gaps -fn ensure_sequential_transactions(mut batches: Vec>) -> Vec { - // If there's only one, no sorting required - if batches.len() == 1 { - return batches.pop().unwrap(); - } - - // Sort by the first version per batch, ascending - batches.sort_by(|a, b| a.first().unwrap().version.cmp(&b.first().unwrap().version)); - let first_version = batches.first().unwrap().first().unwrap().version; - let last_version = batches.last().unwrap().last().unwrap().version; - let mut transactions: Vec = vec![]; - - let mut prev_start = None; - let mut prev_end = None; - for mut batch in batches { - let mut start_version = batch.first().unwrap().version; - let end_version = batch.last().unwrap().version; - if prev_start.is_some() { - let prev_start = prev_start.unwrap(); - let prev_end = prev_end.unwrap(); - // If this batch is fully contained within the previous batch, skip it - if prev_start <= start_version && prev_end >= end_version { - NUM_MULTI_FETCH_OVERLAPPED_VERSIONS - .with_label_values(&[SERVICE_TYPE, "full"]) - .inc_by(end_version - start_version); - continue; - } - // If this batch overlaps with the previous batch, combine them - if prev_end >= start_version { - NUM_MULTI_FETCH_OVERLAPPED_VERSIONS - .with_label_values(&[SERVICE_TYPE, "partial"]) - .inc_by(prev_end - start_version + 1); - tracing::debug!( - batch_first_version = first_version, - batch_last_version = last_version, - start_version = start_version, - end_version = end_version, - prev_start = ?prev_start, - prev_end = prev_end, - "[Filestore] Overlapping version data" - ); - batch.drain(0..(prev_end - start_version + 1) as usize); - start_version = batch.first().unwrap().version; - } - - // Otherwise there is a gap - if prev_end + 1 != start_version { - NUM_MULTI_FETCH_OVERLAPPED_VERSIONS - .with_label_values(&[SERVICE_TYPE, "gap"]) - .inc_by(prev_end - start_version + 1); - - tracing::error!( - batch_first_version = first_version, - batch_last_version = last_version, - start_version = start_version, - end_version = end_version, - prev_start = ?prev_start, - prev_end = prev_end, - "[Filestore] Gaps or dupes in processing version data" - ); - panic!("[Filestore] Gaps in processing data batch_first_version: {}, batch_last_version: {}, start_version: {}, end_version: {}, prev_start: {:?}, prev_end: {:?}", - first_version, - last_version, - start_version, - end_version, - prev_start, - prev_end, - ); - } - } - - prev_start = Some(start_version); - prev_end = Some(end_version); - transactions.extend(batch); - } - - transactions -} - -/// Builds the response for the get transactions request. Partial batch is ok, i.e., a -/// batch with transactions < 1000. -/// -/// It also returns the number of txns that were stripped. fn get_transactions_responses_builder( transactions: Vec, chain_id: u32, - txns_to_strip_filter: &BooleanTransactionFilter, -) -> (Vec, usize) { - let (stripped_transactions, num_stripped) = - strip_transactions(transactions, txns_to_strip_filter); - let chunks = chunk_transactions(stripped_transactions, MESSAGE_SIZE_LIMIT); +) -> Vec { + let chunks = chunk_transactions(transactions, MESSAGE_SIZE_LIMIT); let responses = chunks .into_iter() .map(|chunk| TransactionsResponse { @@ -701,7 +102,7 @@ fn get_transactions_responses_builder( transactions: chunk, }) .collect(); - (responses, num_stripped) + responses } // This is a CPU bound operation, so we spawn_blocking @@ -722,598 +123,17 @@ async fn deserialize_cached_transactions( task.context("Transaction bytes to CacheEntry deserialization task failed") } -/// Fetches data from cache or the file store. It returns the data if it is ready in the cache or file store. -/// Otherwise, it returns the status of the data fetching. -async fn data_fetch( - starting_version: u64, - cache_operator: &mut CacheOperator, - file_store_operator: Arc>, - request_metadata: Arc, - storage_format: StorageFormat, -) -> anyhow::Result { - let current_batch_start_time = std::time::Instant::now(); - let batch_get_result = cache_operator - .batch_get_encoded_proto_data(starting_version) - .await; - - match batch_get_result { - // Data is not ready yet in the cache. - Ok(CacheBatchGetStatus::NotReady) => Ok(TransactionsDataStatus::AheadOfCache), - Ok(CacheBatchGetStatus::Ok(transactions)) => { - let decoding_start_time = std::time::Instant::now(); - let size_in_bytes = transactions - .iter() - .map(|transaction| transaction.len()) - .sum::(); - let num_of_transactions = transactions.len(); - let duration_in_secs = current_batch_start_time.elapsed().as_secs_f64(); - - let transactions = - deserialize_cached_transactions(transactions, storage_format).await?; - let start_version_timestamp = transactions.first().unwrap().timestamp.as_ref(); - let end_version_timestamp = transactions.last().unwrap().timestamp.as_ref(); - - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceDataFetchedCache, - Some(starting_version as i64), - Some(starting_version as i64 + num_of_transactions as i64 - 1), - start_version_timestamp, - end_version_timestamp, - Some(duration_in_secs), - Some(size_in_bytes), - Some(num_of_transactions as i64), - Some(&request_metadata), - ); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceTxnsDecoded, - Some(starting_version as i64), - Some(starting_version as i64 + num_of_transactions as i64 - 1), - start_version_timestamp, - end_version_timestamp, - Some(decoding_start_time.elapsed().as_secs_f64()), - Some(size_in_bytes), - Some(num_of_transactions as i64), - Some(&request_metadata), - ); - - Ok(TransactionsDataStatus::Success(transactions)) - }, - Ok(CacheBatchGetStatus::EvictedFromCache) => { - let transactions = - data_fetch_from_filestore(starting_version, file_store_operator, request_metadata) - .await?; - Ok(TransactionsDataStatus::Success(transactions)) - }, - Err(e) => Err(e), - } -} - -async fn data_fetch_from_filestore( - starting_version: u64, - file_store_operator: Arc>, - request_metadata: Arc, -) -> anyhow::Result> { - // Data is evicted from the cache. Fetch from file store. - let (transactions, io_duration, decoding_duration) = file_store_operator - .get_transactions_with_durations(starting_version, NUM_DATA_FETCH_RETRIES) - .await?; - let size_in_bytes = transactions - .iter() - .map(|transaction| transaction.encoded_len()) - .sum::(); - let num_of_transactions = transactions.len(); - let start_version_timestamp = transactions.first().unwrap().timestamp.as_ref(); - let end_version_timestamp = transactions.last().unwrap().timestamp.as_ref(); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceDataFetchedFilestore, - Some(starting_version as i64), - Some(starting_version as i64 + num_of_transactions as i64 - 1), - start_version_timestamp, - end_version_timestamp, - Some(io_duration), - Some(size_in_bytes), - Some(num_of_transactions as i64), - Some(&request_metadata), - ); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceTxnsDecoded, - Some(starting_version as i64), - Some(starting_version as i64 + num_of_transactions as i64 - 1), - start_version_timestamp, - end_version_timestamp, - Some(decoding_duration), - Some(size_in_bytes), - Some(num_of_transactions as i64), - Some(&request_metadata), - ); - Ok(transactions) -} - -/// Handles the case when the data is not ready in the cache, i.e., beyond the current head. -async fn ahead_of_cache_data_handling() { - // TODO: add exponential backoff. - tokio::time::sleep(Duration::from_millis( - AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS, - )) - .await; -} - -/// Handles data fetch errors, including cache and file store related errors. -async fn data_fetch_error_handling(err: anyhow::Error, current_version: u64, chain_id: u64) { - error!( - chain_id = chain_id, - current_version = current_version, - "[Data Service] Failed to fetch data from cache and file store. {:?}", - err - ); - tokio::time::sleep(Duration::from_millis( - TRANSIENT_DATA_ERROR_RETRY_SLEEP_DURATION_MS, - )) - .await; -} - -/// Gets the request metadata. Useful for logging. -fn get_request_metadata( - req: &Request, -) -> tonic::Result { - let request_metadata_pairs = vec![ - ( - "request_identifier_type", - REQUEST_HEADER_APTOS_IDENTIFIER_TYPE, - ), - ("request_identifier", REQUEST_HEADER_APTOS_IDENTIFIER), - ("request_email", REQUEST_HEADER_APTOS_EMAIL), - ( - "request_application_name", - REQUEST_HEADER_APTOS_APPLICATION_NAME, - ), - ("request_token", GRPC_AUTH_TOKEN_HEADER), - ("processor_name", GRPC_REQUEST_NAME_HEADER), - ]; - let mut request_metadata_map: HashMap = request_metadata_pairs - .into_iter() - .map(|(key, value)| { - ( - key.to_string(), - req.metadata() - .get(value) - .map(|value| value.to_str().unwrap_or("unspecified").to_string()) - .unwrap_or("unspecified".to_string()), - ) - }) - .collect(); - request_metadata_map.insert( - "request_connection_id".to_string(), - Uuid::new_v4().to_string(), - ); - let request_metadata: IndexerGrpcRequestMetadata = - serde_json::from_str(&serde_json::to_string(&request_metadata_map).unwrap()).unwrap(); - // TODO: update the request name if these are internal requests. - Ok(request_metadata) -} - async fn channel_send_multiple_with_timeout( resp_items: Vec, tx: tokio::sync::mpsc::Sender>, - request_metadata: Arc, ) -> Result<(), SendTimeoutError>> { - let overall_send_start_time = Instant::now(); - let overall_size_in_bytes = resp_items - .iter() - .map(|resp_item| resp_item.encoded_len()) - .sum::(); - let overall_start_txn = resp_items.first().unwrap().transactions.first().unwrap(); - let overall_end_txn = resp_items.last().unwrap().transactions.last().unwrap(); - let overall_start_version = overall_start_txn.version; - let overall_end_version = overall_end_txn.version; - let overall_start_txn_timestamp = overall_start_txn.clone().timestamp; - let overall_end_txn_timestamp = overall_end_txn.clone().timestamp; - for resp_item in resp_items { - let send_start_time = Instant::now(); - let response_size = resp_item.encoded_len(); - let num_of_transactions = resp_item.transactions.len(); - let start_version = resp_item.transactions.first().unwrap().version; - let end_version = resp_item.transactions.last().unwrap().version; - let start_version_txn_timestamp = resp_item - .transactions - .first() - .unwrap() - .timestamp - .as_ref() - .unwrap(); - let end_version_txn_timestamp = resp_item - .transactions - .last() - .unwrap() - .timestamp - .as_ref() - .unwrap(); - tx.send_timeout( Result::::Ok(resp_item.clone()), RESPONSE_CHANNEL_SEND_TIMEOUT, ) .await?; - - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceChunkSent, - Some(start_version as i64), - Some(end_version as i64), - Some(start_version_txn_timestamp), - Some(end_version_txn_timestamp), - Some(send_start_time.elapsed().as_secs_f64()), - Some(response_size), - Some(num_of_transactions as i64), - Some(&request_metadata), - ); } - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceAllChunksSent, - Some(overall_start_version as i64), - Some(overall_end_version as i64), - overall_start_txn_timestamp.as_ref(), - overall_end_txn_timestamp.as_ref(), - Some(overall_send_start_time.elapsed().as_secs_f64()), - Some(overall_size_in_bytes), - Some((overall_end_version - overall_start_version + 1) as i64), - Some(&request_metadata), - ); - Ok(()) } - -/// This function strips transactions that match the given filter. Stripping means we -/// remove the payload, signature, events, and writesets. Note, the filter can be -/// composed of many conditions, see `BooleanTransactionFilter` for more. -/// -/// This returns the mutated txns and the number of txns that were stripped. -fn strip_transactions( - transactions: Vec, - txns_to_strip_filter: &BooleanTransactionFilter, -) -> (Vec, usize) { - let mut stripped_count = 0; - - let stripped_transactions: Vec = transactions - .into_iter() - .map(|mut txn| { - // Note: `is_allowed` means the txn matches the filter, in which case - // we strip it. - if txns_to_strip_filter.is_allowed(&txn) { - stripped_count += 1; - if let Some(info) = txn.info.as_mut() { - info.changes = vec![]; - } - if let Some(TxnData::User(user_transaction)) = txn.txn_data.as_mut() { - user_transaction.events = vec![]; - if let Some(utr) = user_transaction.request.as_mut() { - // Wipe the payload and signature. - utr.payload = None; - utr.signature = None; - } - } - } - txn - }) - .collect(); - - (stripped_transactions, stripped_count) -} - -#[cfg(test)] -mod tests { - use super::*; - use aptos_protos::transaction::v1::{ - transaction::TxnData, transaction_payload::Payload, EntryFunctionId, EntryFunctionPayload, - Event, MoveModuleId, Signature, Transaction, TransactionInfo, TransactionPayload, - UserTransaction, UserTransactionRequest, WriteSetChange, - }; - use aptos_transaction_filter::{ - boolean_transaction_filter::APIFilter, filters::UserTransactionFilterBuilder, - EntryFunctionFilterBuilder, UserTransactionPayloadFilterBuilder, - }; - - fn create_test_transaction( - module_address: String, - module_name: String, - function_name: String, - ) -> Transaction { - Transaction { - version: 1, - txn_data: Some(TxnData::User(UserTransaction { - request: Some(UserTransactionRequest { - payload: Some(TransactionPayload { - r#type: 1, - payload: Some(Payload::EntryFunctionPayload(EntryFunctionPayload { - function: Some(EntryFunctionId { - module: Some(MoveModuleId { - address: module_address, - name: module_name, - }), - name: function_name, - }), - ..Default::default() - })), - }), - signature: Some(Signature::default()), - ..Default::default() - }), - events: vec![Event::default()], - })), - info: Some(TransactionInfo { - changes: vec![WriteSetChange::default()], - ..Default::default() - }), - ..Default::default() - } - } - - #[test] - fn test_ensure_sequential_transactions_merges_and_sorts() { - let transactions1 = (1..5) - .map(|i| Transaction { - version: i, - ..Default::default() - }) - .collect(); - let transactions2 = (5..10) - .map(|i| Transaction { - version: i, - ..Default::default() - }) - .collect(); - // No overlap, just normal fetching flow - let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); - assert_eq!(transactions1.len(), 9); - assert_eq!(transactions1.first().unwrap().version, 1); - assert_eq!(transactions1.last().unwrap().version, 9); - - // This is a full overlap - let transactions2 = (5..7) - .map(|i| Transaction { - version: i, - ..Default::default() - }) - .collect(); - let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); - assert_eq!(transactions1.len(), 9); - assert_eq!(transactions1.first().unwrap().version, 1); - assert_eq!(transactions1.last().unwrap().version, 9); - - // Partial overlap - let transactions2 = (5..12) - .map(|i| Transaction { - version: i, - ..Default::default() - }) - .collect(); - let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); - assert_eq!(transactions1.len(), 11); - assert_eq!(transactions1.first().unwrap().version, 1); - assert_eq!(transactions1.last().unwrap().version, 11); - } - - const MODULE_ADDRESS: &str = "0x1234"; - const MODULE_NAME: &str = "module"; - const FUNCTION_NAME: &str = "function"; - - #[test] - fn test_transactions_are_stripped_correctly_sender_addresses() { - let sender_address = "0x1234".to_string(); - // Create a transaction with a user transaction - let txn = Transaction { - version: 1, - txn_data: Some(TxnData::User(UserTransaction { - request: Some(UserTransactionRequest { - sender: sender_address.clone(), - payload: Some(TransactionPayload::default()), - signature: Some(Signature::default()), - ..Default::default() - }), - events: vec![Event::default()], - })), - info: Some(TransactionInfo { - changes: vec![WriteSetChange::default()], - ..Default::default() - }), - ..Default::default() - }; - - // Create filter for senders to ignore. - let sender_filters = vec![sender_address] - .into_iter() - .map(|address| { - BooleanTransactionFilter::from(APIFilter::UserTransactionFilter( - UserTransactionFilterBuilder::default() - .sender(address) - .build() - .unwrap(), - )) - }) - .collect(); - let filter = BooleanTransactionFilter::new_or(sender_filters); - - let (filtered_txns, num_stripped) = strip_transactions(vec![txn], &filter); - assert_eq!(num_stripped, 1); - assert_eq!(filtered_txns.len(), 1); - let txn = filtered_txns.first().unwrap(); - let user_transaction = match &txn.txn_data { - Some(TxnData::User(user_transaction)) => user_transaction, - _ => panic!("Expected user transaction"), - }; - assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); - assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); - assert_eq!(user_transaction.events.len(), 0); - assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); - } - - #[test] - fn test_transactions_are_stripped_correctly_module_address() { - let txn = create_test_transaction( - MODULE_ADDRESS.to_string(), - MODULE_NAME.to_string(), - FUNCTION_NAME.to_string(), - ); - // Testing filter with only address set - let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( - APIFilter::UserTransactionFilter( - UserTransactionFilterBuilder::default() - .payload( - UserTransactionPayloadFilterBuilder::default() - .function( - EntryFunctionFilterBuilder::default() - .address(MODULE_ADDRESS.to_string()) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ), - )]); - - let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); - assert_eq!(num_stripped, 1); - assert_eq!(filtered_txns.len(), 1); - let txn = filtered_txns.first().unwrap(); - let user_transaction = match &txn.txn_data { - Some(TxnData::User(user_transaction)) => user_transaction, - _ => panic!("Expected user transaction"), - }; - assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); - assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); - assert_eq!(user_transaction.events.len(), 0); - assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); - } - - #[test] - fn test_transactions_are_stripped_correctly_module_name() { - let txn = create_test_transaction( - MODULE_ADDRESS.to_string(), - MODULE_NAME.to_string(), - FUNCTION_NAME.to_string(), - ); - // Testing filter with only module set - let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( - APIFilter::UserTransactionFilter( - UserTransactionFilterBuilder::default() - .payload( - UserTransactionPayloadFilterBuilder::default() - .function( - EntryFunctionFilterBuilder::default() - .module(MODULE_NAME.to_string()) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ), - )]); - - let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); - assert_eq!(num_stripped, 1); - assert_eq!(filtered_txns.len(), 1); - let txn = filtered_txns.first().unwrap(); - let user_transaction = match &txn.txn_data { - Some(TxnData::User(user_transaction)) => user_transaction, - _ => panic!("Expected user transaction"), - }; - assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); - assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); - assert_eq!(user_transaction.events.len(), 0); - assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); - } - - #[test] - fn test_transactions_are_stripped_correctly_function_name() { - let txn = create_test_transaction( - MODULE_ADDRESS.to_string(), - MODULE_NAME.to_string(), - FUNCTION_NAME.to_string(), - ); - // Testing filter with only function set - let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( - APIFilter::UserTransactionFilter( - UserTransactionFilterBuilder::default() - .payload( - UserTransactionPayloadFilterBuilder::default() - .function( - EntryFunctionFilterBuilder::default() - .function(FUNCTION_NAME.to_string()) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ), - )]); - - let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); - assert_eq!(num_stripped, 1); - assert_eq!(filtered_txns.len(), 1); - let txn = filtered_txns.first().unwrap(); - let user_transaction = match &txn.txn_data { - Some(TxnData::User(user_transaction)) => user_transaction, - _ => panic!("Expected user transaction"), - }; - assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); - assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); - assert_eq!(user_transaction.events.len(), 0); - assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); - } - #[test] - fn test_transactions_are_not_stripped() { - let txn = create_test_transaction( - MODULE_ADDRESS.to_string(), - MODULE_NAME.to_string(), - FUNCTION_NAME.to_string(), - ); - // Testing filter with wrong filter - let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( - APIFilter::UserTransactionFilter( - UserTransactionFilterBuilder::default() - .payload( - UserTransactionPayloadFilterBuilder::default() - .function( - EntryFunctionFilterBuilder::default() - .function("0xrandom".to_string()) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ), - )]); - - let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); - assert_eq!(num_stripped, 0); - assert_eq!(filtered_txns.len(), 1); - let txn = filtered_txns.first().unwrap(); - let user_transaction = match &txn.txn_data { - Some(TxnData::User(user_transaction)) => user_transaction, - _ => panic!("Expected user transaction"), - }; - assert_ne!(user_transaction.request.as_ref().unwrap().payload, None); - assert_ne!(user_transaction.request.as_ref().unwrap().signature, None); - assert_ne!(user_transaction.events.len(), 0); - assert_ne!(txn.info.as_ref().unwrap().changes.len(), 0); - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-file-store/Cargo.toml index 57d4eed5e863b..dd4707dabf294 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/Cargo.toml @@ -18,6 +18,7 @@ aptos-indexer-grpc-server-framework = { workspace = true } aptos-indexer-grpc-utils = { workspace = true } aptos-metrics-core = { workspace = true } aptos-moving-average = { workspace = true } +aptos-protos = { workspace = true } async-trait = { workspace = true } clap = { workspace = true } futures = { workspace = true } @@ -25,6 +26,7 @@ once_cell = { workspace = true } redis = { workspace = true } serde = { workspace = true } tokio = { workspace = true } +tonic = { workspace = true } tracing = { workspace = true } [target.'cfg(unix)'.dependencies] diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/data_manager.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/data_manager.rs new file mode 100644 index 0000000000000..b5ecff9f19f7a --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/data_manager.rs @@ -0,0 +1,48 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_protos::transaction::v1::Transaction; + +struct FullNodeClient {} + +impl FullNodeClient { + pub(crate) fn new() -> Self { + Self {} + } +} + +struct HistoricalDataFetcher {} + +impl HistoricalDataFetcher { + pub(crate) fn new() -> Self { + Self {} + } +} + +struct LatestDataFetcher {} + +impl LatestDataFetcher { + pub(crate) fn new() -> Self { + Self {} + } +} + +pub(crate) struct DataManager { + historical_data_fetcher: HistoricalDataFetcher, + latest_data_fetcher: LatestDataFetcher, +} + +impl DataManager { + pub(crate) fn new() -> Self { + let historical_data_fetcher = HistoricalDataFetcher::new(); + let latest_data_fetcher = LatestDataFetcher::new(); + Self { + historical_data_fetcher, + latest_data_fetcher, + } + } + + pub(crate) async fn get_transactions(&self, start_version: u64) -> Vec { + vec![] + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/processor.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/file_store_uploader.rs similarity index 66% rename from ecosystem/indexer-grpc/indexer-grpc-file-store/src/processor.rs rename to ecosystem/indexer-grpc/indexer-grpc-file-store/src/file_store_uploader.rs index e9e18d0fb6440..ac6d248e048f5 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/processor.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/file_store_uploader.rs @@ -4,14 +4,10 @@ use crate::metrics::{METADATA_UPLOAD_FAILURE_COUNT, PROCESSED_VERSIONS_COUNT}; use anyhow::{ensure, Context, Result}; use aptos_indexer_grpc_utils::{ - cache_operator::CacheOperator, - compression_util::{FileStoreMetadata, StorageFormat, FILE_ENTRY_TRANSACTION_COUNT}, + compression_util::{FileStoreMetadata, FILE_ENTRY_TRANSACTION_COUNT}, config::IndexerGrpcFileStoreConfig, - counters::{log_grpc_step, IndexerGrpcStep}, file_store_operator::FileStoreOperator, - types::RedisUrl, }; -use aptos_moving_average::MovingAverage; use std::time::Duration; use tracing::debug; @@ -20,44 +16,16 @@ const AHEAD_OF_CACHE_SLEEP_DURATION_IN_MILLIS: u64 = 100; const SERVICE_TYPE: &str = "file_worker"; const MAX_CONCURRENT_BATCHES: usize = 50; -/// Processor tails the data in cache and stores the data in file store. -pub struct Processor { - cache_operator: CacheOperator, +pub struct FileStoreUploader { file_store_operator: Box, chain_id: u64, } -impl Processor { +impl FileStoreUploader { pub async fn new( - redis_main_instance_address: RedisUrl, - file_store_config: IndexerGrpcFileStoreConfig, + file_store_config: &IndexerGrpcFileStoreConfig, chain_id: u64, - enable_cache_compression: bool, ) -> Result { - let cache_storage_format = if enable_cache_compression { - StorageFormat::Lz4CompressedProto - } else { - StorageFormat::Base64UncompressedProto - }; - - // Connection to redis is a hard dependency for file store processor. - let conn = redis::Client::open(redis_main_instance_address.0.clone()) - .with_context(|| { - format!( - "Create redis client for {} failed", - redis_main_instance_address.0 - ) - })? - .get_tokio_connection_manager() - .await - .with_context(|| { - format!( - "Create redis connection to {} failed.", - redis_main_instance_address.0 - ) - })?; - let mut cache_operator = CacheOperator::new(conn, cache_storage_format); - let mut file_store_operator: Box = file_store_config.create(); file_store_operator.verify_storage_bucket_existence().await; let file_store_metadata: Option = @@ -82,22 +50,7 @@ impl Processor { let metadata = file_store_operator.get_file_store_metadata().await.unwrap(); ensure!(metadata.chain_id == chain_id, "Chain ID mismatch."); - let batch_start_version = metadata.version; - // Cache config in the cache - cache_operator.cache_setup_if_needed().await?; - match cache_operator.get_chain_id().await? { - Some(id) => { - ensure!(id == chain_id, "Chain ID mismatch."); - }, - None => { - cache_operator.set_chain_id(chain_id).await?; - }, - } - cache_operator - .update_file_store_latest_version(batch_start_version) - .await?; Ok(Self { - cache_operator, file_store_operator, chain_id, }) @@ -122,7 +75,6 @@ impl Processor { let mut batch_start_version = metadata.version; - let mut tps_calculator = MovingAverage::new(10_000); loop { let latest_loop_time = std::time::Instant::now(); let cache_worker_latest = self.cache_operator.get_latest_version().await?.unwrap(); @@ -164,18 +116,6 @@ impl Processor { .await .unwrap(); let last_transaction = transactions.last().unwrap().clone(); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::FilestoreFetchTxns, - Some(start_version as i64), - Some((start_version + FILE_ENTRY_TRANSACTION_COUNT - 1) as i64), - None, - None, - Some(fetch_start_time.elapsed().as_secs_f64()), - None, - Some(FILE_ENTRY_TRANSACTION_COUNT as i64), - None, - ); for (i, txn) in transactions.iter().enumerate() { assert_eq!(txn.version, start_version + i as u64); } @@ -184,18 +124,6 @@ impl Processor { .upload_transaction_batch(chain_id, transactions) .await .unwrap(); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::FilestoreUploadTxns, - Some(start_version as i64), - Some((start_version + FILE_ENTRY_TRANSACTION_COUNT - 1) as i64), - None, - None, - Some(upload_start_time.elapsed().as_secs_f64()), - None, - Some(FILE_ENTRY_TRANSACTION_COUNT as i64), - None, - ); (start, end, last_transaction) }); @@ -251,13 +179,7 @@ impl Processor { ); let size = last_version - first_version + 1; PROCESSED_VERSIONS_COUNT.inc_by(size); - tps_calculator.tick_now(size); - // Update filestore metadata. First do it in cache for performance then update metadata file - let start_metadata_upload_time = std::time::Instant::now(); - self.cache_operator - .update_file_store_latest_version(batch_start_version) - .await?; while self .file_store_operator .update_file_store_metadata_with_timeout(chain_id, batch_start_version) @@ -271,34 +193,10 @@ impl Processor { std::thread::sleep(std::time::Duration::from_millis(500)); METADATA_UPLOAD_FAILURE_COUNT.inc(); } - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::FilestoreUpdateMetadata, - Some(first_version as i64), - Some(last_version as i64), - None, - None, - Some(start_metadata_upload_time.elapsed().as_secs_f64()), - None, - Some(size as i64), - None, - ); let start_version_timestamp = first_version_encoded.timestamp; let end_version_timestamp = last_version_encoded.timestamp; let full_loop_duration = latest_loop_time.elapsed().as_secs_f64(); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::FilestoreProcessedBatch, - Some(first_version as i64), - Some(last_version as i64), - start_version_timestamp.as_ref(), - end_version_timestamp.as_ref(), - Some(full_loop_duration), - None, - Some(size as i64), - None, - ); } } } diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs index 336a002ca9f72..6af7ef3335eb1 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs @@ -1,67 +1,98 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +pub mod data_manager; +pub mod file_store_uploader; +pub mod metadata_manager; pub mod metrics; -pub mod processor; +pub mod service; +use crate::{ + data_manager::DataManager, metadata_manager::MetadataManager, service::GrpcManagerService, +}; use anyhow::Result; use aptos_indexer_grpc_server_framework::RunnableConfig; -use aptos_indexer_grpc_utils::{config::IndexerGrpcFileStoreConfig, types::RedisUrl}; -use processor::Processor; +use aptos_indexer_grpc_utils::config::IndexerGrpcFileStoreConfig; +use aptos_protos::indexer::v1::grpc_manager_server::GrpcManagerServer; +use file_store_uploader::FileStoreUploader; +use futures::executor::block_on; use serde::{Deserialize, Serialize}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; +use tonic::{codec::CompressionEncoding, transport::Server}; + +const HTTP2_PING_INTERVAL_DURATION: Duration = Duration::from_secs(60); +const HTTP2_PING_TIMEOUT_DURATION: Duration = Duration::from_secs(10); + +#[derive(Clone, Debug, Deserialize, Serialize)] +struct ServiceConfig { + listen_address: SocketAddr, +} #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -pub struct IndexerGrpcFileStoreWorkerConfig { - pub file_store_config: IndexerGrpcFileStoreConfig, - pub redis_main_instance_address: RedisUrl, - pub enable_expensive_logging: Option, +pub struct IndexerGrpcManagerConfig { pub chain_id: u64, - #[serde(default = "default_enable_cache_compression")] - pub enable_cache_compression: bool, + pub service_config: ServiceConfig, + pub file_store_config: IndexerGrpcFileStoreConfig, } -const fn default_enable_cache_compression() -> bool { - false +#[async_trait::async_trait] +impl RunnableConfig for IndexerGrpcManagerConfig { + async fn run(&self) -> Result<()> { + let grpc_manager = GrpcManager::new(self); + grpc_manager + .start(&self.service_config.listen_address) + .await?; + Ok(()) + } + + fn get_server_name(&self) -> String { + "grpc_manager".to_string() + } } -impl IndexerGrpcFileStoreWorkerConfig { - pub fn new( - file_store_config: IndexerGrpcFileStoreConfig, - redis_main_instance_address: RedisUrl, - enable_expensive_logging: Option, - chain_id: u64, - enable_cache_compression: bool, - ) -> Self { +struct GrpcManager { + chain_id: u64, + filestore_uploader: FileStoreUploader, + metadata_manager: Arc, + data_manager: Arc, +} + +impl GrpcManager { + pub(crate) fn new(config: &IndexerGrpcManagerConfig) -> Self { + let chain_id = config.chain_id; + let filestore_uploader = + block_on(FileStoreUploader::new(&config.file_store_config, chain_id)).expect(&format!( + "Failed to create filestore uploader, config: {:?}.", + config.file_store_config + )); + let data_manager = Arc::new(DataManager::new()); + let metadata_manager = Arc::new(MetadataManager::new()); Self { - file_store_config, - redis_main_instance_address, - enable_expensive_logging, chain_id, - enable_cache_compression, + filestore_uploader, + metadata_manager, + data_manager, } } -} -#[async_trait::async_trait] -impl RunnableConfig for IndexerGrpcFileStoreWorkerConfig { - async fn run(&self) -> Result<()> { - let mut processor = Processor::new( - self.redis_main_instance_address.clone(), - self.file_store_config.clone(), + pub(crate) async fn start(&self, listen_address: &SocketAddr) -> Result<()> { + self.metadata_manager.start()?; + let service = GrpcManagerServer::new(GrpcManagerService::new( self.chain_id, - self.enable_cache_compression, - ) - .await - .expect("Failed to create file store processor"); - processor - .run() + self.metadata_manager.clone(), + self.data_manager.clone(), + )) + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd); + let server = Server::builder() + .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) + .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) + .add_service(service); + server + .serve(*listen_address) .await - .expect("File store processor exited unexpectedly"); + .map_err(|e| anyhow::anyhow!(e))?; Ok(()) } - - fn get_server_name(&self) -> String { - "idxfilestore".to_string() - } } diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs index 3e2d0671339bc..896a0990f6e5c 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs @@ -13,7 +13,7 @@ static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; #[tokio::main] async fn main() -> Result<()> { let args = ServerArgs::parse(); - args.run::() + args.run::() .await .expect("Failed to run server"); Ok(()) diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/metadata_manager.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/metadata_manager.rs new file mode 100644 index 0000000000000..756507ef608e0 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/metadata_manager.rs @@ -0,0 +1,57 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::Result; + +struct FullNodeInfo { + address: String, + latest_version: u64, + oldest_version: u64, +} + +struct LiveDataServiceInfo { + address: String, + known_latest_version: u64, + oldest_version: u64, +} + +struct HistoricalDataServiceInfo { + address: String, + known_latest_version: u64, + known_filestore_latest_version: u64, +} + +struct GrpcManagerInfo { + address: String, + is_master: bool, +} + +pub(crate) struct MetadataManager { + grpc_managers: Vec, + fullnodes: Vec, + live_data_services: Vec, + historical_data_services: Vec, +} + +impl MetadataManager { + pub(crate) fn new() -> Self { + Self { + grpc_managers: vec![], + fullnodes: vec![], + live_data_services: vec![], + historical_data_services: vec![], + } + } + + pub(crate) fn start(&self) -> Result<()> { + loop { + for grpc_manager in &self.grpc_managers {} + + for fullnode in &self.fullnodes {} + + for live_data_service in &self.live_data_services {} + + for historical_data_service in &self.historical_data_services {} + } + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs index 7aad300d5730e..d2e0e85c3ae95 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs @@ -383,14 +383,18 @@ impl CacheOperator { let start_time = std::time::Instant::now(); let mut transactions = vec![]; for encoded_transaction in encoded_transactions { + if encoded_transaction.is_empty() { + break; + } let cache_entry: CacheEntry = CacheEntry::new(encoded_transaction, self.storage_format); let transaction = cache_entry.into_transaction(); transactions.push(transaction); } + /* ensure!( transactions.len() == transaction_count as usize, "Failed to get all transactions from cache." - ); + );*/ let decoding_duration = start_time.elapsed().as_secs_f64(); Ok((transactions, io_duration, decoding_duration)) } diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs index 07f528e6df124..fea4f7e64244f 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs @@ -65,6 +65,7 @@ impl FileStoreMetadata { } } +#[derive(Debug)] pub enum CacheEntry { Lz4CompressionProto(Vec), // Only used for legacy cache entry. @@ -147,7 +148,9 @@ impl CacheEntry { decompressor .read_to_end(&mut decompressed) .expect("Lz4 decompression failed."); - Transaction::decode(decompressed.as_slice()).expect("proto deserialization failed.") + let res = Transaction::decode(decompressed.as_slice()) + .expect("proto deserialization failed."); + res }, CacheEntry::Base64UncompressedProto(bytes) => { let bytes: Vec = base64::decode(bytes).expect("base64 decoding failed."); diff --git a/protos/proto/aptos/indexer/v1/grpc.proto b/protos/proto/aptos/indexer/v1/grpc.proto new file mode 100644 index 0000000000000..2505163cee164 --- /dev/null +++ b/protos/proto/aptos/indexer/v1/grpc.proto @@ -0,0 +1,82 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package aptos.indexer.v1; + +import "aptos/indexer/v1/raw_data.proto"; +import "aptos/transaction/v1/transaction.proto"; +import "aptos/util/timestamp/timestamp.proto"; + +message ActiveStream { + optional string id = 1; + optional uint64 current_version = 2; + optional uint64 end_version = 3; +} + +message StreamInfo { + repeated ActiveStream active_stream = 1; +} + +message LiveDataServiceInfo { + optional aptos.util.timestamp.Timestamp timestamp = 1; + optional uint64 known_latest_version = 2; + optional StreamInfo stream_info = 3; +} + +message HistoricalDataServiceInfo { + optional aptos.util.timestamp.Timestamp timestamp = 1; + optional uint64 known_latest_version = 2; +} + +message FullNodeInfo { +} + +message ServiceInfo { + optional string address = 1; + oneof service_type { + LiveDataServiceInfo live_data_service_info = 2; + HistoricalDataServiceInfo historical_data_service_info = 3; + FullNodeInfo full_node_info = 4; + } +} + +message HeartbeatRequest { + optional ServiceInfo service_info = 1; +} + +message HeartbeatResponse { + optional uint64 known_latest_version = 1; +} + +message PingLiveDataServiceRequest { + optional uint64 known_latest_version = 1; +} + +message PingLiveDataServiceResponse { + optional LiveDataServiceInfo service_info = 1; +} + +message PingHistoricalDataServiceRequest { + optional uint64 known_latest_version = 1; +} + +message PingHistoricalDataServiceResponse { + optional HistoricalDataServiceInfo service_info = 1; +} + +service GrpcManager { + rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse); + rpc GetTransactions(GetTransactionsRequest) returns (TransactionsResponse); +} + +service LiveDataService { + rpc Ping(PingLiveDataServiceRequest) returns (PingLiveDataServiceResponse); + rpc GetTransactions(GetTransactionsRequest) returns (stream TransactionsResponse); +} + +service HistoricalDataService { + rpc Ping(PingHistoricalDataServiceRequest) returns (PingHistoricalDataServiceResponse); + rpc GetTransactions(GetTransactionsRequest) returns (stream TransactionsResponse); +} diff --git a/protos/rust/src/pb/aptos.indexer.v1.rs b/protos/rust/src/pb/aptos.indexer.v1.rs index b10551ffc515a..b7c34ab242809 100644 --- a/protos/rust/src/pb/aptos.indexer.v1.rs +++ b/protos/rust/src/pb/aptos.indexer.v1.rs @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // @generated +// This file is @generated by prost-build. /// This is for storage only. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsInStorage { /// Required; transactions data. @@ -13,8 +13,7 @@ pub struct TransactionsInStorage { #[prost(uint64, optional, tag="2")] pub starting_version: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetTransactionsRequest { /// Required; start version of current stream. #[prost(uint64, optional, tag="1")] @@ -29,7 +28,6 @@ pub struct GetTransactionsRequest { pub batch_size: ::core::option::Option, } /// TransactionsResponse is a batch of transactions. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsResponse { /// Required; transactions data. @@ -39,6 +37,88 @@ pub struct TransactionsResponse { #[prost(uint64, optional, tag="2")] pub chain_id: ::core::option::Option, } +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ActiveStream { + #[prost(string, optional, tag="1")] + pub id: ::core::option::Option<::prost::alloc::string::String>, + #[prost(uint64, optional, tag="2")] + pub current_version: ::core::option::Option, + #[prost(uint64, optional, tag="3")] + pub end_version: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StreamInfo { + #[prost(message, repeated, tag="1")] + pub active_stream: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LiveDataServiceInfo { + #[prost(message, optional, tag="1")] + pub timestamp: ::core::option::Option, + #[prost(uint64, optional, tag="2")] + pub known_latest_version: ::core::option::Option, + #[prost(message, optional, tag="3")] + pub stream_info: ::core::option::Option, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct HistoricalDataServiceInfo { + #[prost(message, optional, tag="1")] + pub timestamp: ::core::option::Option, + #[prost(uint64, optional, tag="2")] + pub known_latest_version: ::core::option::Option, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct FullNodeInfo { +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ServiceInfo { + #[prost(string, optional, tag="1")] + pub address: ::core::option::Option<::prost::alloc::string::String>, + #[prost(oneof="service_info::ServiceType", tags="2, 3, 4")] + pub service_type: ::core::option::Option, +} +/// Nested message and enum types in `ServiceInfo`. +pub mod service_info { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum ServiceType { + #[prost(message, tag="2")] + LiveDataServiceInfo(super::LiveDataServiceInfo), + #[prost(message, tag="3")] + HistoricalDataServiceInfo(super::HistoricalDataServiceInfo), + #[prost(message, tag="4")] + FullNodeInfo(super::FullNodeInfo), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HeartbeatRequest { + #[prost(message, optional, tag="1")] + pub service_info: ::core::option::Option, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct HeartbeatResponse { + #[prost(uint64, optional, tag="1")] + pub known_latest_version: ::core::option::Option, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct PingLiveDataServiceRequest { + #[prost(uint64, optional, tag="1")] + pub known_latest_version: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PingLiveDataServiceResponse { + #[prost(message, optional, tag="1")] + pub service_info: ::core::option::Option, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct PingHistoricalDataServiceRequest { + #[prost(uint64, optional, tag="1")] + pub known_latest_version: ::core::option::Option, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct PingHistoricalDataServiceResponse { + #[prost(message, optional, tag="1")] + pub service_info: ::core::option::Option, +} /// Encoded file descriptor set for the `aptos.indexer.v1` package pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x0a, 0xce, 0x12, 0x0a, 0x1f, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, @@ -190,7 +270,313 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x29, 0x16, 0x2c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x29, 0x37, 0x3d, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x29, 0x3e, 0x52, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x33, 0x0a, 0xa3, 0x26, 0x0a, 0x1b, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x10, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, + 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x61, 0x77, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x61, 0x70, + 0x74, 0x6f, 0x73, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x02, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x48, 0x01, 0x52, 0x0e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x65, 0x6e, 0x64, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, 0x52, 0x0a, 0x65, + 0x6e, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x05, 0x0a, 0x03, + 0x5f, 0x69, 0x64, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x65, 0x6e, 0x64, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x51, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x43, 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, + 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x0c, 0x61, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x22, 0x8b, 0x02, 0x0a, 0x13, 0x4c, + 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x42, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x75, 0x74, + 0x69, 0x6c, 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, + 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, + 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, + 0x0b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x66, 0x6f, + 0x48, 0x02, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x88, 0x01, + 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0xbd, 0x01, 0x0a, 0x19, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x42, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x70, 0x74, 0x6f, + 0x73, 0x2e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, + 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x0e, 0x0a, 0x0c, 0x46, 0x75, 0x6c, 0x6c, + 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xde, 0x02, 0x0a, 0x0b, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x5c, 0x0a, 0x16, 0x6c, 0x69, 0x76, 0x65, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x76, 0x65, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, + 0x52, 0x13, 0x6c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x6e, 0x0a, 0x1c, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, + 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, + 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x19, 0x68, 0x69, 0x73, 0x74, + 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x46, 0x0a, 0x0e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x6f, + 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, + 0x0c, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x0e, 0x0a, + 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x0a, 0x0a, + 0x08, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x6a, 0x0a, 0x10, 0x48, 0x65, 0x61, + 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, + 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x63, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, + 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, + 0x01, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, + 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x6c, 0x0a, 0x1a, 0x50, 0x69, + 0x6e, 0x67, 0x4c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, + 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, + 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7d, 0x0a, 0x1b, 0x50, 0x69, 0x6e, 0x67, + 0x4c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x4c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x72, 0x0a, 0x20, 0x50, 0x69, 0x6e, 0x67, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x14, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x12, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, + 0x01, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, + 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x21, + 0x50, 0x69, 0x6e, 0x67, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x53, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, + 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x32, 0xc8, 0x01, 0x0a, 0x0b, 0x47, 0x72, 0x70, 0x63, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, + 0x62, 0x65, 0x61, 0x74, 0x12, 0x22, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x72, + 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, + 0x0f, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x61, 0x70, 0x74, + 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x32, 0xdd, 0x01, 0x0a, 0x0f, 0x4c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x63, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x2c, + 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x61, + 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0f, 0x47, + 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, + 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x32, 0xef, 0x01, 0x0a, 0x15, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, + 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6f, 0x0a, 0x04, + 0x50, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, + 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, + 0x0f, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x61, 0x70, 0x74, + 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x30, 0x01, 0x42, 0x83, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x74, + 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x09, 0x47, + 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xa2, 0x02, 0x03, 0x41, 0x49, 0x58, + 0xaa, 0x02, 0x10, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x10, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x5c, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1c, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x5c, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x3a, 0x3a, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x4a, 0xe2, 0x10, 0x0a, 0x06, 0x12, + 0x04, 0x03, 0x00, 0x51, 0x01, 0x0a, 0x4e, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x03, 0x00, 0x12, 0x32, + 0x44, 0x20, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0xc2, 0xa9, 0x20, 0x41, + 0x70, 0x74, 0x6f, 0x73, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, + 0x20, 0x53, 0x50, 0x44, 0x58, 0x2d, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x2d, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x3a, 0x20, 0x41, 0x70, 0x61, 0x63, 0x68, 0x65, + 0x2d, 0x32, 0x2e, 0x30, 0x0a, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x05, 0x00, 0x19, 0x0a, + 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x07, 0x00, 0x29, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x01, + 0x12, 0x03, 0x08, 0x00, 0x30, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x02, 0x12, 0x03, 0x09, 0x00, 0x2e, + 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x0b, 0x00, 0x0f, 0x01, 0x0a, 0x0a, 0x0a, 0x03, + 0x04, 0x00, 0x01, 0x12, 0x03, 0x0b, 0x08, 0x14, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, + 0x12, 0x03, 0x0c, 0x02, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x04, 0x12, 0x03, + 0x0c, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, 0x0c, 0x0b, + 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0c, 0x12, 0x14, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0c, 0x17, 0x18, 0x0a, 0x0b, 0x0a, + 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0d, 0x02, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x01, 0x04, 0x12, 0x03, 0x0d, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, + 0x05, 0x12, 0x03, 0x0d, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, + 0x03, 0x0d, 0x12, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x0d, + 0x24, 0x25, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x0e, 0x02, 0x22, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x04, 0x12, 0x03, 0x0e, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x0e, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x0e, 0x12, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x02, 0x03, 0x12, 0x03, 0x0e, 0x20, 0x21, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x11, + 0x00, 0x13, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x11, 0x08, 0x12, 0x0a, + 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x12, 0x02, 0x2a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x00, 0x04, 0x12, 0x03, 0x12, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x00, 0x06, 0x12, 0x03, 0x12, 0x0b, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, + 0x01, 0x12, 0x03, 0x12, 0x18, 0x25, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, + 0x03, 0x12, 0x28, 0x29, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x15, 0x00, 0x19, 0x01, + 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x15, 0x08, 0x1b, 0x0a, 0x0b, 0x0a, 0x04, + 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x16, 0x04, 0x3a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x00, 0x04, 0x12, 0x03, 0x16, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x06, + 0x12, 0x03, 0x16, 0x0d, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, + 0x16, 0x2c, 0x35, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x16, 0x38, + 0x39, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x17, 0x04, 0x2d, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x04, 0x12, 0x03, 0x17, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x02, 0x02, 0x01, 0x05, 0x12, 0x03, 0x17, 0x0d, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, + 0x02, 0x01, 0x01, 0x12, 0x03, 0x17, 0x14, 0x28, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, + 0x03, 0x12, 0x03, 0x17, 0x2b, 0x2c, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x02, 0x12, 0x03, + 0x18, 0x04, 0x28, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x04, 0x12, 0x03, 0x18, 0x04, + 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x06, 0x12, 0x03, 0x18, 0x0d, 0x17, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x01, 0x12, 0x03, 0x18, 0x18, 0x23, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x02, 0x02, 0x02, 0x03, 0x12, 0x03, 0x18, 0x26, 0x27, 0x0a, 0x0a, 0x0a, 0x02, 0x04, + 0x03, 0x12, 0x04, 0x1b, 0x00, 0x1e, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x03, 0x01, 0x12, 0x03, + 0x1b, 0x08, 0x21, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, 0x03, 0x1c, 0x04, 0x3a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x04, 0x12, 0x03, 0x1c, 0x04, 0x0c, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x06, 0x12, 0x03, 0x1c, 0x0d, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x1c, 0x2c, 0x35, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, + 0x02, 0x00, 0x03, 0x12, 0x03, 0x1c, 0x38, 0x39, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x01, + 0x12, 0x03, 0x1d, 0x04, 0x2d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x04, 0x12, 0x03, + 0x1d, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x05, 0x12, 0x03, 0x1d, 0x0d, + 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x12, 0x03, 0x1d, 0x14, 0x28, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, 0x03, 0x1d, 0x2b, 0x2c, 0x0a, 0x0a, 0x0a, + 0x02, 0x04, 0x04, 0x12, 0x04, 0x20, 0x00, 0x21, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x04, 0x01, + 0x12, 0x03, 0x20, 0x08, 0x14, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x05, 0x12, 0x04, 0x23, 0x00, 0x2a, + 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x05, 0x01, 0x12, 0x03, 0x23, 0x08, 0x13, 0x0a, 0x0b, 0x0a, + 0x04, 0x04, 0x05, 0x02, 0x00, 0x12, 0x03, 0x24, 0x02, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, + 0x02, 0x00, 0x04, 0x12, 0x03, 0x24, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, + 0x05, 0x12, 0x03, 0x24, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x01, 0x12, + 0x03, 0x24, 0x12, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x03, 0x12, 0x03, 0x24, + 0x1c, 0x1d, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x05, 0x08, 0x00, 0x12, 0x04, 0x25, 0x02, 0x29, 0x03, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x08, 0x00, 0x01, 0x12, 0x03, 0x25, 0x08, 0x14, 0x0a, 0x0b, + 0x0a, 0x04, 0x04, 0x05, 0x02, 0x01, 0x12, 0x03, 0x26, 0x06, 0x35, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x05, 0x02, 0x01, 0x06, 0x12, 0x03, 0x26, 0x06, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, + 0x01, 0x01, 0x12, 0x03, 0x26, 0x1a, 0x30, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x03, + 0x12, 0x03, 0x26, 0x33, 0x34, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x02, 0x12, 0x03, 0x27, + 0x06, 0x41, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x06, 0x12, 0x03, 0x27, 0x06, 0x1f, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x01, 0x12, 0x03, 0x27, 0x20, 0x3c, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x03, 0x12, 0x03, 0x27, 0x3f, 0x40, 0x0a, 0x0b, 0x0a, 0x04, + 0x04, 0x05, 0x02, 0x03, 0x12, 0x03, 0x28, 0x06, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, + 0x03, 0x06, 0x12, 0x03, 0x28, 0x06, 0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x03, 0x01, + 0x12, 0x03, 0x28, 0x13, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x03, 0x03, 0x12, 0x03, + 0x28, 0x24, 0x25, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x06, 0x12, 0x04, 0x2c, 0x00, 0x2e, 0x01, 0x0a, + 0x0a, 0x0a, 0x03, 0x04, 0x06, 0x01, 0x12, 0x03, 0x2c, 0x08, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, + 0x06, 0x02, 0x00, 0x12, 0x03, 0x2d, 0x02, 0x28, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, + 0x04, 0x12, 0x03, 0x2d, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x06, 0x12, + 0x03, 0x2d, 0x0b, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x01, 0x12, 0x03, 0x2d, + 0x17, 0x23, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x03, 0x12, 0x03, 0x2d, 0x26, 0x27, + 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x07, 0x12, 0x04, 0x30, 0x00, 0x32, 0x01, 0x0a, 0x0a, 0x0a, 0x03, + 0x04, 0x07, 0x01, 0x12, 0x03, 0x30, 0x08, 0x19, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x00, + 0x12, 0x03, 0x31, 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x04, 0x12, 0x03, + 0x31, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x05, 0x12, 0x03, 0x31, 0x0b, + 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x01, 0x12, 0x03, 0x31, 0x12, 0x26, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x03, 0x12, 0x03, 0x31, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, + 0x02, 0x04, 0x08, 0x12, 0x04, 0x34, 0x00, 0x36, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x08, 0x01, + 0x12, 0x03, 0x34, 0x08, 0x22, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x08, 0x02, 0x00, 0x12, 0x03, 0x35, + 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x00, 0x04, 0x12, 0x03, 0x35, 0x02, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x00, 0x05, 0x12, 0x03, 0x35, 0x0b, 0x11, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x08, 0x02, 0x00, 0x01, 0x12, 0x03, 0x35, 0x12, 0x26, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x08, 0x02, 0x00, 0x03, 0x12, 0x03, 0x35, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x09, + 0x12, 0x04, 0x38, 0x00, 0x3a, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x09, 0x01, 0x12, 0x03, 0x38, + 0x08, 0x23, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x09, 0x02, 0x00, 0x12, 0x03, 0x39, 0x02, 0x30, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, 0x04, 0x12, 0x03, 0x39, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x09, 0x02, 0x00, 0x06, 0x12, 0x03, 0x39, 0x0b, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x09, 0x02, 0x00, 0x01, 0x12, 0x03, 0x39, 0x1f, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x09, 0x02, + 0x00, 0x03, 0x12, 0x03, 0x39, 0x2e, 0x2f, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x0a, 0x12, 0x04, 0x3c, + 0x00, 0x3e, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x0a, 0x01, 0x12, 0x03, 0x3c, 0x08, 0x28, 0x0a, + 0x0b, 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x00, 0x12, 0x03, 0x3d, 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x0a, 0x02, 0x00, 0x04, 0x12, 0x03, 0x3d, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0a, + 0x02, 0x00, 0x05, 0x12, 0x03, 0x3d, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, + 0x01, 0x12, 0x03, 0x3d, 0x12, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x03, 0x12, + 0x03, 0x3d, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x0b, 0x12, 0x04, 0x40, 0x00, 0x42, 0x01, + 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x0b, 0x01, 0x12, 0x03, 0x40, 0x08, 0x29, 0x0a, 0x0b, 0x0a, 0x04, + 0x04, 0x0b, 0x02, 0x00, 0x12, 0x03, 0x41, 0x02, 0x36, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0b, 0x02, + 0x00, 0x04, 0x12, 0x03, 0x41, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x06, + 0x12, 0x03, 0x41, 0x0b, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x01, 0x12, 0x03, + 0x41, 0x25, 0x31, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x03, 0x12, 0x03, 0x41, 0x34, + 0x35, 0x0a, 0x0a, 0x0a, 0x02, 0x06, 0x00, 0x12, 0x04, 0x44, 0x00, 0x47, 0x01, 0x0a, 0x0a, 0x0a, + 0x03, 0x06, 0x00, 0x01, 0x12, 0x03, 0x44, 0x08, 0x13, 0x0a, 0x0b, 0x0a, 0x04, 0x06, 0x00, 0x02, + 0x00, 0x12, 0x03, 0x45, 0x04, 0x40, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x01, 0x12, + 0x03, 0x45, 0x08, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x45, + 0x12, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x45, 0x2d, 0x3e, + 0x0a, 0x0b, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x01, 0x12, 0x03, 0x46, 0x04, 0x4f, 0x0a, 0x0c, 0x0a, + 0x05, 0x06, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x46, 0x08, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x06, + 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, 0x46, 0x18, 0x2e, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, + 0x01, 0x03, 0x12, 0x03, 0x46, 0x39, 0x4d, 0x0a, 0x0a, 0x0a, 0x02, 0x06, 0x01, 0x12, 0x04, 0x49, + 0x00, 0x4c, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x06, 0x01, 0x01, 0x12, 0x03, 0x49, 0x08, 0x17, 0x0a, + 0x0b, 0x0a, 0x04, 0x06, 0x01, 0x02, 0x00, 0x12, 0x03, 0x4a, 0x02, 0x4d, 0x0a, 0x0c, 0x0a, 0x05, + 0x06, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x4a, 0x06, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x01, + 0x02, 0x00, 0x02, 0x12, 0x03, 0x4a, 0x0b, 0x25, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x01, 0x02, 0x00, + 0x03, 0x12, 0x03, 0x4a, 0x30, 0x4b, 0x0a, 0x0b, 0x0a, 0x04, 0x06, 0x01, 0x02, 0x01, 0x12, 0x03, + 0x4b, 0x02, 0x54, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x4b, 0x06, + 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x01, 0x02, 0x01, 0x02, 0x12, 0x03, 0x4b, 0x16, 0x2c, 0x0a, + 0x0c, 0x0a, 0x05, 0x06, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x4b, 0x37, 0x3d, 0x0a, 0x0c, 0x0a, + 0x05, 0x06, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x4b, 0x3e, 0x52, 0x0a, 0x0a, 0x0a, 0x02, 0x06, + 0x02, 0x12, 0x04, 0x4e, 0x00, 0x51, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x06, 0x02, 0x01, 0x12, 0x03, + 0x4e, 0x08, 0x1d, 0x0a, 0x0b, 0x0a, 0x04, 0x06, 0x02, 0x02, 0x00, 0x12, 0x03, 0x4f, 0x02, 0x59, + 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x4f, 0x06, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x06, 0x02, 0x02, 0x00, 0x02, 0x12, 0x03, 0x4f, 0x0b, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, + 0x06, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x4f, 0x36, 0x57, 0x0a, 0x0b, 0x0a, 0x04, 0x06, 0x02, + 0x02, 0x01, 0x12, 0x03, 0x50, 0x02, 0x54, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x02, 0x02, 0x01, 0x01, + 0x12, 0x03, 0x50, 0x06, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x02, 0x02, 0x01, 0x02, 0x12, 0x03, + 0x50, 0x16, 0x2c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x02, 0x02, 0x01, 0x06, 0x12, 0x03, 0x50, 0x37, + 0x3d, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x02, 0x02, 0x01, 0x03, 0x12, 0x03, 0x50, 0x3e, 0x52, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ]; include!("aptos.indexer.v1.serde.rs"); include!("aptos.indexer.v1.tonic.rs"); diff --git a/protos/rust/src/pb/aptos.indexer.v1.serde.rs b/protos/rust/src/pb/aptos.indexer.v1.serde.rs index f77e4d406fad9..5364ffc2ff2b3 100644 --- a/protos/rust/src/pb/aptos.indexer.v1.serde.rs +++ b/protos/rust/src/pb/aptos.indexer.v1.serde.rs @@ -2,6 +2,208 @@ // SPDX-License-Identifier: Apache-2.0 // @generated +impl serde::Serialize for ActiveStream { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.id.is_some() { + len += 1; + } + if self.current_version.is_some() { + len += 1; + } + if self.end_version.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.ActiveStream", len)?; + if let Some(v) = self.id.as_ref() { + struct_ser.serialize_field("id", v)?; + } + if let Some(v) = self.current_version.as_ref() { + struct_ser.serialize_field("currentVersion", ToString::to_string(&v).as_str())?; + } + if let Some(v) = self.end_version.as_ref() { + struct_ser.serialize_field("endVersion", ToString::to_string(&v).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for ActiveStream { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "id", + "current_version", + "currentVersion", + "end_version", + "endVersion", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Id, + CurrentVersion, + EndVersion, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "id" => Ok(GeneratedField::Id), + "currentVersion" | "current_version" => Ok(GeneratedField::CurrentVersion), + "endVersion" | "end_version" => Ok(GeneratedField::EndVersion), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = ActiveStream; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.ActiveStream") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut id__ = None; + let mut current_version__ = None; + let mut end_version__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Id => { + if id__.is_some() { + return Err(serde::de::Error::duplicate_field("id")); + } + id__ = map.next_value()?; + } + GeneratedField::CurrentVersion => { + if current_version__.is_some() { + return Err(serde::de::Error::duplicate_field("currentVersion")); + } + current_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + GeneratedField::EndVersion => { + if end_version__.is_some() { + return Err(serde::de::Error::duplicate_field("endVersion")); + } + end_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + } + } + Ok(ActiveStream { + id: id__, + current_version: current_version__, + end_version: end_version__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.ActiveStream", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for FullNodeInfo { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let len = 0; + let struct_ser = serializer.serialize_struct("aptos.indexer.v1.FullNodeInfo", len)?; + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for FullNodeInfo { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + Err(serde::de::Error::unknown_field(value, FIELDS)) + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = FullNodeInfo; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.FullNodeInfo") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + while map.next_key::()?.is_some() { + let _ = map.next_value::()?; + } + Ok(FullNodeInfo { + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.FullNodeInfo", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for GetTransactionsRequest { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result @@ -136,6 +338,1038 @@ impl<'de> serde::Deserialize<'de> for GetTransactionsRequest { deserializer.deserialize_struct("aptos.indexer.v1.GetTransactionsRequest", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for HeartbeatRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.service_info.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.HeartbeatRequest", len)?; + if let Some(v) = self.service_info.as_ref() { + struct_ser.serialize_field("serviceInfo", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for HeartbeatRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "service_info", + "serviceInfo", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + ServiceInfo, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "serviceInfo" | "service_info" => Ok(GeneratedField::ServiceInfo), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = HeartbeatRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.HeartbeatRequest") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut service_info__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::ServiceInfo => { + if service_info__.is_some() { + return Err(serde::de::Error::duplicate_field("serviceInfo")); + } + service_info__ = map.next_value()?; + } + } + } + Ok(HeartbeatRequest { + service_info: service_info__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.HeartbeatRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for HeartbeatResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.known_latest_version.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.HeartbeatResponse", len)?; + if let Some(v) = self.known_latest_version.as_ref() { + struct_ser.serialize_field("knownLatestVersion", ToString::to_string(&v).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for HeartbeatResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "known_latest_version", + "knownLatestVersion", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + KnownLatestVersion, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "knownLatestVersion" | "known_latest_version" => Ok(GeneratedField::KnownLatestVersion), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = HeartbeatResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.HeartbeatResponse") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut known_latest_version__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::KnownLatestVersion => { + if known_latest_version__.is_some() { + return Err(serde::de::Error::duplicate_field("knownLatestVersion")); + } + known_latest_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + } + } + Ok(HeartbeatResponse { + known_latest_version: known_latest_version__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.HeartbeatResponse", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for HistoricalDataServiceInfo { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.timestamp.is_some() { + len += 1; + } + if self.known_latest_version.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.HistoricalDataServiceInfo", len)?; + if let Some(v) = self.timestamp.as_ref() { + struct_ser.serialize_field("timestamp", v)?; + } + if let Some(v) = self.known_latest_version.as_ref() { + struct_ser.serialize_field("knownLatestVersion", ToString::to_string(&v).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for HistoricalDataServiceInfo { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "timestamp", + "known_latest_version", + "knownLatestVersion", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Timestamp, + KnownLatestVersion, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "timestamp" => Ok(GeneratedField::Timestamp), + "knownLatestVersion" | "known_latest_version" => Ok(GeneratedField::KnownLatestVersion), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = HistoricalDataServiceInfo; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.HistoricalDataServiceInfo") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut timestamp__ = None; + let mut known_latest_version__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Timestamp => { + if timestamp__.is_some() { + return Err(serde::de::Error::duplicate_field("timestamp")); + } + timestamp__ = map.next_value()?; + } + GeneratedField::KnownLatestVersion => { + if known_latest_version__.is_some() { + return Err(serde::de::Error::duplicate_field("knownLatestVersion")); + } + known_latest_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + } + } + Ok(HistoricalDataServiceInfo { + timestamp: timestamp__, + known_latest_version: known_latest_version__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.HistoricalDataServiceInfo", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for LiveDataServiceInfo { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.timestamp.is_some() { + len += 1; + } + if self.known_latest_version.is_some() { + len += 1; + } + if self.stream_info.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.LiveDataServiceInfo", len)?; + if let Some(v) = self.timestamp.as_ref() { + struct_ser.serialize_field("timestamp", v)?; + } + if let Some(v) = self.known_latest_version.as_ref() { + struct_ser.serialize_field("knownLatestVersion", ToString::to_string(&v).as_str())?; + } + if let Some(v) = self.stream_info.as_ref() { + struct_ser.serialize_field("streamInfo", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for LiveDataServiceInfo { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "timestamp", + "known_latest_version", + "knownLatestVersion", + "stream_info", + "streamInfo", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Timestamp, + KnownLatestVersion, + StreamInfo, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "timestamp" => Ok(GeneratedField::Timestamp), + "knownLatestVersion" | "known_latest_version" => Ok(GeneratedField::KnownLatestVersion), + "streamInfo" | "stream_info" => Ok(GeneratedField::StreamInfo), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = LiveDataServiceInfo; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.LiveDataServiceInfo") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut timestamp__ = None; + let mut known_latest_version__ = None; + let mut stream_info__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Timestamp => { + if timestamp__.is_some() { + return Err(serde::de::Error::duplicate_field("timestamp")); + } + timestamp__ = map.next_value()?; + } + GeneratedField::KnownLatestVersion => { + if known_latest_version__.is_some() { + return Err(serde::de::Error::duplicate_field("knownLatestVersion")); + } + known_latest_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + GeneratedField::StreamInfo => { + if stream_info__.is_some() { + return Err(serde::de::Error::duplicate_field("streamInfo")); + } + stream_info__ = map.next_value()?; + } + } + } + Ok(LiveDataServiceInfo { + timestamp: timestamp__, + known_latest_version: known_latest_version__, + stream_info: stream_info__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.LiveDataServiceInfo", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for PingHistoricalDataServiceRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.known_latest_version.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.PingHistoricalDataServiceRequest", len)?; + if let Some(v) = self.known_latest_version.as_ref() { + struct_ser.serialize_field("knownLatestVersion", ToString::to_string(&v).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for PingHistoricalDataServiceRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "known_latest_version", + "knownLatestVersion", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + KnownLatestVersion, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "knownLatestVersion" | "known_latest_version" => Ok(GeneratedField::KnownLatestVersion), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = PingHistoricalDataServiceRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.PingHistoricalDataServiceRequest") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut known_latest_version__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::KnownLatestVersion => { + if known_latest_version__.is_some() { + return Err(serde::de::Error::duplicate_field("knownLatestVersion")); + } + known_latest_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + } + } + Ok(PingHistoricalDataServiceRequest { + known_latest_version: known_latest_version__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.PingHistoricalDataServiceRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for PingHistoricalDataServiceResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.service_info.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.PingHistoricalDataServiceResponse", len)?; + if let Some(v) = self.service_info.as_ref() { + struct_ser.serialize_field("serviceInfo", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for PingHistoricalDataServiceResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "service_info", + "serviceInfo", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + ServiceInfo, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "serviceInfo" | "service_info" => Ok(GeneratedField::ServiceInfo), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = PingHistoricalDataServiceResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.PingHistoricalDataServiceResponse") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut service_info__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::ServiceInfo => { + if service_info__.is_some() { + return Err(serde::de::Error::duplicate_field("serviceInfo")); + } + service_info__ = map.next_value()?; + } + } + } + Ok(PingHistoricalDataServiceResponse { + service_info: service_info__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.PingHistoricalDataServiceResponse", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for PingLiveDataServiceRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.known_latest_version.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.PingLiveDataServiceRequest", len)?; + if let Some(v) = self.known_latest_version.as_ref() { + struct_ser.serialize_field("knownLatestVersion", ToString::to_string(&v).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for PingLiveDataServiceRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "known_latest_version", + "knownLatestVersion", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + KnownLatestVersion, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "knownLatestVersion" | "known_latest_version" => Ok(GeneratedField::KnownLatestVersion), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = PingLiveDataServiceRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.PingLiveDataServiceRequest") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut known_latest_version__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::KnownLatestVersion => { + if known_latest_version__.is_some() { + return Err(serde::de::Error::duplicate_field("knownLatestVersion")); + } + known_latest_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + } + } + Ok(PingLiveDataServiceRequest { + known_latest_version: known_latest_version__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.PingLiveDataServiceRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for PingLiveDataServiceResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.service_info.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.PingLiveDataServiceResponse", len)?; + if let Some(v) = self.service_info.as_ref() { + struct_ser.serialize_field("serviceInfo", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for PingLiveDataServiceResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "service_info", + "serviceInfo", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + ServiceInfo, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "serviceInfo" | "service_info" => Ok(GeneratedField::ServiceInfo), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = PingLiveDataServiceResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.PingLiveDataServiceResponse") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut service_info__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::ServiceInfo => { + if service_info__.is_some() { + return Err(serde::de::Error::duplicate_field("serviceInfo")); + } + service_info__ = map.next_value()?; + } + } + } + Ok(PingLiveDataServiceResponse { + service_info: service_info__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.PingLiveDataServiceResponse", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for ServiceInfo { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.address.is_some() { + len += 1; + } + if self.service_type.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.ServiceInfo", len)?; + if let Some(v) = self.address.as_ref() { + struct_ser.serialize_field("address", v)?; + } + if let Some(v) = self.service_type.as_ref() { + match v { + service_info::ServiceType::LiveDataServiceInfo(v) => { + struct_ser.serialize_field("liveDataServiceInfo", v)?; + } + service_info::ServiceType::HistoricalDataServiceInfo(v) => { + struct_ser.serialize_field("historicalDataServiceInfo", v)?; + } + service_info::ServiceType::FullNodeInfo(v) => { + struct_ser.serialize_field("fullNodeInfo", v)?; + } + } + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for ServiceInfo { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "address", + "live_data_service_info", + "liveDataServiceInfo", + "historical_data_service_info", + "historicalDataServiceInfo", + "full_node_info", + "fullNodeInfo", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Address, + LiveDataServiceInfo, + HistoricalDataServiceInfo, + FullNodeInfo, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "address" => Ok(GeneratedField::Address), + "liveDataServiceInfo" | "live_data_service_info" => Ok(GeneratedField::LiveDataServiceInfo), + "historicalDataServiceInfo" | "historical_data_service_info" => Ok(GeneratedField::HistoricalDataServiceInfo), + "fullNodeInfo" | "full_node_info" => Ok(GeneratedField::FullNodeInfo), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = ServiceInfo; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.ServiceInfo") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut address__ = None; + let mut service_type__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Address => { + if address__.is_some() { + return Err(serde::de::Error::duplicate_field("address")); + } + address__ = map.next_value()?; + } + GeneratedField::LiveDataServiceInfo => { + if service_type__.is_some() { + return Err(serde::de::Error::duplicate_field("liveDataServiceInfo")); + } + service_type__ = map.next_value::<::std::option::Option<_>>()?.map(service_info::ServiceType::LiveDataServiceInfo) +; + } + GeneratedField::HistoricalDataServiceInfo => { + if service_type__.is_some() { + return Err(serde::de::Error::duplicate_field("historicalDataServiceInfo")); + } + service_type__ = map.next_value::<::std::option::Option<_>>()?.map(service_info::ServiceType::HistoricalDataServiceInfo) +; + } + GeneratedField::FullNodeInfo => { + if service_type__.is_some() { + return Err(serde::de::Error::duplicate_field("fullNodeInfo")); + } + service_type__ = map.next_value::<::std::option::Option<_>>()?.map(service_info::ServiceType::FullNodeInfo) +; + } + } + } + Ok(ServiceInfo { + address: address__, + service_type: service_type__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.ServiceInfo", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for StreamInfo { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if !self.active_stream.is_empty() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.StreamInfo", len)?; + if !self.active_stream.is_empty() { + struct_ser.serialize_field("activeStream", &self.active_stream)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for StreamInfo { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "active_stream", + "activeStream", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + ActiveStream, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "activeStream" | "active_stream" => Ok(GeneratedField::ActiveStream), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = StreamInfo; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.StreamInfo") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut active_stream__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::ActiveStream => { + if active_stream__.is_some() { + return Err(serde::de::Error::duplicate_field("activeStream")); + } + active_stream__ = Some(map.next_value()?); + } + } + } + Ok(StreamInfo { + active_stream: active_stream__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.StreamInfo", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for TransactionsInStorage { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result diff --git a/protos/rust/src/pb/aptos.indexer.v1.tonic.rs b/protos/rust/src/pb/aptos.indexer.v1.tonic.rs index 3055af3b2a2e3..1b0c7b16f968c 100644 --- a/protos/rust/src/pb/aptos.indexer.v1.tonic.rs +++ b/protos/rust/src/pb/aptos.indexer.v1.tonic.rs @@ -4,7 +4,13 @@ // @generated /// Generated client implementations. pub mod raw_data_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// @@ -27,8 +33,8 @@ pub mod raw_data_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -53,7 +59,7 @@ pub mod raw_data_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { RawDataClient::new(InterceptedService::new(inner, interceptor)) } @@ -101,8 +107,7 @@ pub mod raw_data_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -119,16 +124,22 @@ pub mod raw_data_client { } /// Generated server implementations. pub mod raw_data_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with RawDataServer. #[async_trait] - pub trait RawData: Send + Sync + 'static { + pub trait RawData: std::marker::Send + std::marker::Sync + 'static { /// Server streaming response type for the GetTransactions method. - type GetTransactionsStream: futures_core::Stream< + type GetTransactionsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, > - + Send + + std::marker::Send + 'static; /** Get transactions batch without any filtering from starting version and end if transaction count is present. */ @@ -142,20 +153,18 @@ pub mod raw_data_server { } /// #[derive(Debug)] - pub struct RawDataServer { - inner: _Inner, + pub struct RawDataServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl RawDataServer { + impl RawDataServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -205,8 +214,8 @@ pub mod raw_data_server { impl tonic::codegen::Service> for RawDataServer where T: RawData, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -218,7 +227,6 @@ pub mod raw_data_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/aptos.indexer.v1.RawData/GetTransactions" => { #[allow(non_camel_case_types)] @@ -240,7 +248,7 @@ pub mod raw_data_server { ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - (*inner).get_transactions(request).await + ::get_transactions(&inner, request).await }; Box::pin(fut) } @@ -251,7 +259,6 @@ pub mod raw_data_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTransactionsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -270,20 +277,25 @@ pub mod raw_data_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for RawDataServer { + impl Clone for RawDataServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -295,17 +307,1185 @@ pub mod raw_data_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.indexer.v1.RawData"; + impl tonic::server::NamedService for RawDataServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated client implementations. +pub mod grpc_manager_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// + #[derive(Debug, Clone)] + pub struct GrpcManagerClient { + inner: tonic::client::Grpc, + } + impl GrpcManagerClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl GrpcManagerClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> GrpcManagerClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + GrpcManagerClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// + pub async fn heartbeat( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/aptos.indexer.v1.GrpcManager/Heartbeat", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("aptos.indexer.v1.GrpcManager", "Heartbeat")); + self.inner.unary(req, path, codec).await + } + /// + pub async fn get_transactions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/aptos.indexer.v1.GrpcManager/GetTransactions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("aptos.indexer.v1.GrpcManager", "GetTransactions"), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod grpc_manager_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with GrpcManagerServer. + #[async_trait] + pub trait GrpcManager: std::marker::Send + std::marker::Sync + 'static { + /// + async fn heartbeat( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// + async fn get_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// + #[derive(Debug)] + pub struct GrpcManagerServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl GrpcManagerServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self } } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) + impl tonic::codegen::Service> for GrpcManagerServer + where + T: GrpcManager, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/aptos.indexer.v1.GrpcManager/Heartbeat" => { + #[allow(non_camel_case_types)] + struct HeartbeatSvc(pub Arc); + impl< + T: GrpcManager, + > tonic::server::UnaryService + for HeartbeatSvc { + type Response = super::HeartbeatResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::heartbeat(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = HeartbeatSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/aptos.indexer.v1.GrpcManager/GetTransactions" => { + #[allow(non_camel_case_types)] + struct GetTransactionsSvc(pub Arc); + impl< + T: GrpcManager, + > tonic::server::UnaryService + for GetTransactionsSvc { + type Response = super::TransactionsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_transactions(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTransactionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for GrpcManagerServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.indexer.v1.GrpcManager"; + impl tonic::server::NamedService for GrpcManagerServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated client implementations. +pub mod live_data_service_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// + #[derive(Debug, Clone)] + pub struct LiveDataServiceClient { + inner: tonic::client::Grpc, + } + impl LiveDataServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl LiveDataServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> LiveDataServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + LiveDataServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// + pub async fn ping( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/aptos.indexer.v1.LiveDataService/Ping", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("aptos.indexer.v1.LiveDataService", "Ping")); + self.inner.unary(req, path, codec).await + } + /// + pub async fn get_transactions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/aptos.indexer.v1.LiveDataService/GetTransactions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "aptos.indexer.v1.LiveDataService", + "GetTransactions", + ), + ); + self.inner.server_streaming(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod live_data_service_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with LiveDataServiceServer. + #[async_trait] + pub trait LiveDataService: std::marker::Send + std::marker::Sync + 'static { + /// + async fn ping( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the GetTransactions method. + type GetTransactionsStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// + async fn get_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// + #[derive(Debug)] + pub struct LiveDataServiceServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl LiveDataServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for LiveDataServiceServer + where + T: LiveDataService, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/aptos.indexer.v1.LiveDataService/Ping" => { + #[allow(non_camel_case_types)] + struct PingSvc(pub Arc); + impl< + T: LiveDataService, + > tonic::server::UnaryService + for PingSvc { + type Response = super::PingLiveDataServiceResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::ping(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = PingSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/aptos.indexer.v1.LiveDataService/GetTransactions" => { + #[allow(non_camel_case_types)] + struct GetTransactionsSvc(pub Arc); + impl< + T: LiveDataService, + > tonic::server::ServerStreamingService< + super::GetTransactionsRequest, + > for GetTransactionsSvc { + type Response = super::TransactionsResponse; + type ResponseStream = T::GetTransactionsStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_transactions(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTransactionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for LiveDataServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.indexer.v1.LiveDataService"; + impl tonic::server::NamedService for LiveDataServiceServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated client implementations. +pub mod historical_data_service_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// + #[derive(Debug, Clone)] + pub struct HistoricalDataServiceClient { + inner: tonic::client::Grpc, + } + impl HistoricalDataServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl HistoricalDataServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> HistoricalDataServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + HistoricalDataServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// + pub async fn ping( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/aptos.indexer.v1.HistoricalDataService/Ping", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("aptos.indexer.v1.HistoricalDataService", "Ping"), + ); + self.inner.unary(req, path, codec).await + } + /// + pub async fn get_transactions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/aptos.indexer.v1.HistoricalDataService/GetTransactions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "aptos.indexer.v1.HistoricalDataService", + "GetTransactions", + ), + ); + self.inner.server_streaming(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod historical_data_service_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with HistoricalDataServiceServer. + #[async_trait] + pub trait HistoricalDataService: std::marker::Send + std::marker::Sync + 'static { + /// + async fn ping( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the GetTransactions method. + type GetTransactionsStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// + async fn get_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// + #[derive(Debug)] + pub struct HistoricalDataServiceServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl HistoricalDataServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> + for HistoricalDataServiceServer + where + T: HistoricalDataService, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/aptos.indexer.v1.HistoricalDataService/Ping" => { + #[allow(non_camel_case_types)] + struct PingSvc(pub Arc); + impl< + T: HistoricalDataService, + > tonic::server::UnaryService< + super::PingHistoricalDataServiceRequest, + > for PingSvc { + type Response = super::PingHistoricalDataServiceResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::PingHistoricalDataServiceRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::ping(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = PingSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/aptos.indexer.v1.HistoricalDataService/GetTransactions" => { + #[allow(non_camel_case_types)] + struct GetTransactionsSvc(pub Arc); + impl< + T: HistoricalDataService, + > tonic::server::ServerStreamingService< + super::GetTransactionsRequest, + > for GetTransactionsSvc { + type Response = super::TransactionsResponse; + type ResponseStream = T::GetTransactionsStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_transactions( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTransactionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for HistoricalDataServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } } } - impl tonic::server::NamedService for RawDataServer { - const NAME: &'static str = "aptos.indexer.v1.RawData"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.indexer.v1.HistoricalDataService"; + impl tonic::server::NamedService for HistoricalDataServiceServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/protos/rust/src/pb/aptos.internal.fullnode.v1.rs b/protos/rust/src/pb/aptos.internal.fullnode.v1.rs index 2a2fa4be3823c..2567f141fad8d 100644 --- a/protos/rust/src/pb/aptos.internal.fullnode.v1.rs +++ b/protos/rust/src/pb/aptos.internal.fullnode.v1.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // @generated +// This file is @generated by prost-build. // Transaction data is transferred via 1 stream with batches until terminated. // One stream consists: // StreamStatus: INIT with version x @@ -9,14 +10,12 @@ // TransactionOutput data(size n) // StreamStatus: BATCH_END with version x + (k + 1) * n - 1 -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsOutput { #[prost(message, repeated, tag="1")] pub transactions: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct StreamStatus { #[prost(enumeration="stream_status::StatusType", tag="1")] pub r#type: i32, @@ -45,9 +44,9 @@ pub mod stream_status { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - StatusType::Unspecified => "STATUS_TYPE_UNSPECIFIED", - StatusType::Init => "STATUS_TYPE_INIT", - StatusType::BatchEnd => "STATUS_TYPE_BATCH_END", + Self::Unspecified => "STATUS_TYPE_UNSPECIFIED", + Self::Init => "STATUS_TYPE_INIT", + Self::BatchEnd => "STATUS_TYPE_BATCH_END", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -61,8 +60,7 @@ pub mod stream_status { } } } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetTransactionsFromNodeRequest { /// Required; start version of current stream. /// If not set will panic somewhere @@ -73,7 +71,6 @@ pub struct GetTransactionsFromNodeRequest { #[prost(uint64, optional, tag="2")] pub transactions_count: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsFromNodeResponse { /// Making sure that all the responses include a chain id @@ -84,8 +81,7 @@ pub struct TransactionsFromNodeResponse { } /// Nested message and enum types in `TransactionsFromNodeResponse`. pub mod transactions_from_node_response { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Response { #[prost(message, tag="1")] Status(super::StreamStatus), diff --git a/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs b/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs index e95301b991017..e5ad50187619f 100644 --- a/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs +++ b/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs @@ -4,7 +4,13 @@ // @generated /// Generated client implementations. pub mod fullnode_data_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// @@ -27,8 +33,8 @@ pub mod fullnode_data_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -53,7 +59,7 @@ pub mod fullnode_data_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { FullnodeDataClient::new(InterceptedService::new(inner, interceptor)) } @@ -102,8 +108,7 @@ pub mod fullnode_data_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -125,19 +130,25 @@ pub mod fullnode_data_client { } /// Generated server implementations. pub mod fullnode_data_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with FullnodeDataServer. #[async_trait] - pub trait FullnodeData: Send + Sync + 'static { + pub trait FullnodeData: std::marker::Send + std::marker::Sync + 'static { /// Server streaming response type for the GetTransactionsFromNode method. - type GetTransactionsFromNodeStream: futures_core::Stream< + type GetTransactionsFromNodeStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result< super::TransactionsFromNodeResponse, tonic::Status, >, > - + Send + + std::marker::Send + 'static; /// async fn get_transactions_from_node( @@ -150,20 +161,18 @@ pub mod fullnode_data_server { } /// #[derive(Debug)] - pub struct FullnodeDataServer { - inner: _Inner, + pub struct FullnodeDataServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl FullnodeDataServer { + impl FullnodeDataServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -213,8 +222,8 @@ pub mod fullnode_data_server { impl tonic::codegen::Service> for FullnodeDataServer where T: FullnodeData, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -226,7 +235,6 @@ pub mod fullnode_data_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/aptos.internal.fullnode.v1.FullnodeData/GetTransactionsFromNode" => { #[allow(non_camel_case_types)] @@ -250,7 +258,11 @@ pub mod fullnode_data_server { ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - (*inner).get_transactions_from_node(request).await + ::get_transactions_from_node( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -261,7 +273,6 @@ pub mod fullnode_data_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTransactionsFromNodeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -280,20 +291,25 @@ pub mod fullnode_data_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for FullnodeDataServer { + impl Clone for FullnodeDataServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -305,17 +321,9 @@ pub mod fullnode_data_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for FullnodeDataServer { - const NAME: &'static str = "aptos.internal.fullnode.v1.FullnodeData"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.internal.fullnode.v1.FullnodeData"; + impl tonic::server::NamedService for FullnodeDataServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/protos/rust/src/pb/aptos.remote_executor.v1.rs b/protos/rust/src/pb/aptos.remote_executor.v1.rs index dcf6074bfbf9e..29daad3efd968 100644 --- a/protos/rust/src/pb/aptos.remote_executor.v1.rs +++ b/protos/rust/src/pb/aptos.remote_executor.v1.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // @generated -#[allow(clippy::derive_partial_eq_without_eq)] +// This file is @generated by prost-build. #[derive(Clone, PartialEq, ::prost::Message)] pub struct NetworkMessage { #[prost(bytes="vec", tag="1")] @@ -10,8 +10,7 @@ pub struct NetworkMessage { #[prost(string, tag="2")] pub message_type: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Empty { } /// Encoded file descriptor set for the `aptos.remote_executor.v1` package diff --git a/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs b/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs index 84cfa7776d314..85f08bf9e8caa 100644 --- a/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs +++ b/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs @@ -4,7 +4,13 @@ // @generated /// Generated client implementations. pub mod network_message_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// @@ -27,8 +33,8 @@ pub mod network_message_service_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -53,7 +59,7 @@ pub mod network_message_service_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { NetworkMessageServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -97,8 +103,7 @@ pub mod network_message_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -120,11 +125,17 @@ pub mod network_message_service_client { } /// Generated server implementations. pub mod network_message_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with NetworkMessageServiceServer. #[async_trait] - pub trait NetworkMessageService: Send + Sync + 'static { + pub trait NetworkMessageService: std::marker::Send + std::marker::Sync + 'static { /// async fn simple_msg_exchange( &self, @@ -133,20 +144,18 @@ pub mod network_message_service_server { } /// #[derive(Debug)] - pub struct NetworkMessageServiceServer { - inner: _Inner, + pub struct NetworkMessageServiceServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl NetworkMessageServiceServer { + impl NetworkMessageServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -197,8 +206,8 @@ pub mod network_message_service_server { for NetworkMessageServiceServer where T: NetworkMessageService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -210,7 +219,6 @@ pub mod network_message_service_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/aptos.remote_executor.v1.NetworkMessageService/SimpleMsgExchange" => { #[allow(non_camel_case_types)] @@ -230,7 +238,11 @@ pub mod network_message_service_server { ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - (*inner).simple_msg_exchange(request).await + ::simple_msg_exchange( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -241,7 +253,6 @@ pub mod network_message_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = SimpleMsgExchangeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -260,20 +271,25 @@ pub mod network_message_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for NetworkMessageServiceServer { + impl Clone for NetworkMessageServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -285,18 +301,9 @@ pub mod network_message_service_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService - for NetworkMessageServiceServer { - const NAME: &'static str = "aptos.remote_executor.v1.NetworkMessageService"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.remote_executor.v1.NetworkMessageService"; + impl tonic::server::NamedService for NetworkMessageServiceServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/protos/rust/src/pb/aptos.transaction.v1.rs b/protos/rust/src/pb/aptos.transaction.v1.rs index 1cd72373d592d..8e9ffcceb7d51 100644 --- a/protos/rust/src/pb/aptos.transaction.v1.rs +++ b/protos/rust/src/pb/aptos.transaction.v1.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // @generated +// This file is @generated by prost-build. /// A block on Aptos holds transactions in chronological order (ordered by a transactions monotonically increasing `version` field) /// All blocks start with a `BlockMetadataTransaction`, and are followed by zero or more transactions. /// The next `BlockMetadataTransaction` denotes the end of the current block, and the start of the next one. @@ -11,7 +12,6 @@ /// the same `height`. /// /// The Genesis Transaction (version 0) is contained within the first block, which has a height of `0` -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Block { /// Timestamp represents the timestamp of the `BlockMetadataTransaction` (or `GenesisTransaction` for the genesis block) @@ -34,7 +34,6 @@ pub struct Block { /// - Block Metadata Transaction: transactions generated by the chain to group together transactions forming a "block" /// - Block Epilogue / State Checkpoint Transaction: transactions generated by the chain to end the group transactions forming a bloc /// - Genesis Transaction: the first transaction of the chain, with all core contract and validator information baked in -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Transaction { #[prost(message, optional, tag="1")] @@ -75,13 +74,13 @@ pub mod transaction { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - TransactionType::Unspecified => "TRANSACTION_TYPE_UNSPECIFIED", - TransactionType::Genesis => "TRANSACTION_TYPE_GENESIS", - TransactionType::BlockMetadata => "TRANSACTION_TYPE_BLOCK_METADATA", - TransactionType::StateCheckpoint => "TRANSACTION_TYPE_STATE_CHECKPOINT", - TransactionType::User => "TRANSACTION_TYPE_USER", - TransactionType::Validator => "TRANSACTION_TYPE_VALIDATOR", - TransactionType::BlockEpilogue => "TRANSACTION_TYPE_BLOCK_EPILOGUE", + Self::Unspecified => "TRANSACTION_TYPE_UNSPECIFIED", + Self::Genesis => "TRANSACTION_TYPE_GENESIS", + Self::BlockMetadata => "TRANSACTION_TYPE_BLOCK_METADATA", + Self::StateCheckpoint => "TRANSACTION_TYPE_STATE_CHECKPOINT", + Self::User => "TRANSACTION_TYPE_USER", + Self::Validator => "TRANSACTION_TYPE_VALIDATOR", + Self::BlockEpilogue => "TRANSACTION_TYPE_BLOCK_EPILOGUE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -98,8 +97,7 @@ pub mod transaction { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum TxnData { #[prost(message, tag="7")] BlockMetadata(super::BlockMetadataTransaction), @@ -118,7 +116,6 @@ pub mod transaction { } } /// Transaction types. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockMetadataTransaction { #[prost(string, tag="1")] @@ -134,7 +131,6 @@ pub struct BlockMetadataTransaction { #[prost(uint32, repeated, tag="6")] pub failed_proposer_indices: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GenesisTransaction { #[prost(message, optional, tag="1")] @@ -142,11 +138,9 @@ pub struct GenesisTransaction { #[prost(message, repeated, tag="2")] pub events: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct StateCheckpointTransaction { } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ValidatorTransaction { #[prost(message, repeated, tag="3")] @@ -156,16 +150,14 @@ pub struct ValidatorTransaction { } /// Nested message and enum types in `ValidatorTransaction`. pub mod validator_transaction { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ObservedJwkUpdate { #[prost(message, optional, tag="1")] pub quorum_certified_update: ::core::option::Option, } /// Nested message and enum types in `ObservedJwkUpdate`. pub mod observed_jwk_update { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportedProviderJwKs { #[prost(string, tag="1")] pub issuer: ::prost::alloc::string::String, @@ -176,16 +168,14 @@ pub mod validator_transaction { } /// Nested message and enum types in `ExportedProviderJWKs`. pub mod exported_provider_jw_ks { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct Jwk { #[prost(oneof="jwk::JwkType", tags="1, 2")] pub jwk_type: ::core::option::Option, } /// Nested message and enum types in `JWK`. pub mod jwk { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct Rsa { #[prost(string, tag="1")] pub kid: ::prost::alloc::string::String, @@ -198,16 +188,14 @@ pub mod validator_transaction { #[prost(string, tag="5")] pub n: ::prost::alloc::string::String, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct UnsupportedJwk { #[prost(bytes="vec", tag="1")] pub id: ::prost::alloc::vec::Vec, #[prost(bytes="vec", tag="2")] pub payload: ::prost::alloc::vec::Vec, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum JwkType { #[prost(message, tag="1")] UnsupportedJwk(UnsupportedJwk), @@ -216,8 +204,7 @@ pub mod validator_transaction { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportedAggregateSignature { #[prost(uint64, repeated, tag="1")] pub signer_indices: ::prost::alloc::vec::Vec, @@ -225,8 +212,7 @@ pub mod validator_transaction { #[prost(bytes="vec", tag="2")] pub sig: ::prost::alloc::vec::Vec, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct QuorumCertifiedUpdate { #[prost(message, optional, tag="1")] pub update: ::core::option::Option, @@ -234,16 +220,14 @@ pub mod validator_transaction { pub multi_sig: ::core::option::Option, } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct DkgUpdate { #[prost(message, optional, tag="1")] pub dkg_transcript: ::core::option::Option, } /// Nested message and enum types in `DkgUpdate`. pub mod dkg_update { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct DkgTranscript { #[prost(uint64, tag="1")] pub epoch: u64, @@ -253,8 +237,7 @@ pub mod validator_transaction { pub payload: ::prost::alloc::vec::Vec, } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum ValidatorTransactionType { #[prost(message, tag="1")] ObservedJwkUpdate(ObservedJwkUpdate), @@ -262,14 +245,12 @@ pub mod validator_transaction { DkgUpdate(DkgUpdate), } } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct BlockEpilogueTransaction { #[prost(message, optional, tag="1")] pub block_end_info: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct BlockEndInfo { #[prost(bool, tag="1")] pub block_gas_limit_reached: bool, @@ -280,7 +261,6 @@ pub struct BlockEndInfo { #[prost(uint64, tag="4")] pub block_approx_output_size: u64, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UserTransaction { #[prost(message, optional, tag="1")] @@ -288,7 +268,6 @@ pub struct UserTransaction { #[prost(message, repeated, tag="2")] pub events: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Event { #[prost(message, optional, tag="1")] @@ -302,7 +281,6 @@ pub struct Event { #[prost(string, tag="4")] pub data: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionInfo { #[prost(bytes="vec", tag="1")] @@ -324,7 +302,6 @@ pub struct TransactionInfo { #[prost(message, repeated, tag="9")] pub changes: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EventKey { #[prost(uint64, tag="1")] @@ -332,7 +309,6 @@ pub struct EventKey { #[prost(string, tag="2")] pub account_address: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UserTransactionRequest { #[prost(string, tag="1")] @@ -350,7 +326,6 @@ pub struct UserTransactionRequest { #[prost(message, optional, tag="7")] pub signature: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteSet { #[prost(enumeration="write_set::WriteSetType", tag="1")] @@ -374,9 +349,9 @@ pub mod write_set { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - WriteSetType::Unspecified => "WRITE_SET_TYPE_UNSPECIFIED", - WriteSetType::ScriptWriteSet => "WRITE_SET_TYPE_SCRIPT_WRITE_SET", - WriteSetType::DirectWriteSet => "WRITE_SET_TYPE_DIRECT_WRITE_SET", + Self::Unspecified => "WRITE_SET_TYPE_UNSPECIFIED", + Self::ScriptWriteSet => "WRITE_SET_TYPE_SCRIPT_WRITE_SET", + Self::DirectWriteSet => "WRITE_SET_TYPE_DIRECT_WRITE_SET", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -389,8 +364,7 @@ pub mod write_set { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum WriteSet { #[prost(message, tag="2")] ScriptWriteSet(super::ScriptWriteSet), @@ -398,7 +372,6 @@ pub mod write_set { DirectWriteSet(super::DirectWriteSet), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ScriptWriteSet { #[prost(string, tag="1")] @@ -406,7 +379,6 @@ pub struct ScriptWriteSet { #[prost(message, optional, tag="2")] pub script: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DirectWriteSet { #[prost(message, repeated, tag="1")] @@ -414,7 +386,6 @@ pub struct DirectWriteSet { #[prost(message, repeated, tag="2")] pub events: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteSetChange { #[prost(enumeration="write_set_change::Type", tag="1")] @@ -442,13 +413,13 @@ pub mod write_set_change { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::DeleteModule => "TYPE_DELETE_MODULE", - Type::DeleteResource => "TYPE_DELETE_RESOURCE", - Type::DeleteTableItem => "TYPE_DELETE_TABLE_ITEM", - Type::WriteModule => "TYPE_WRITE_MODULE", - Type::WriteResource => "TYPE_WRITE_RESOURCE", - Type::WriteTableItem => "TYPE_WRITE_TABLE_ITEM", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::DeleteModule => "TYPE_DELETE_MODULE", + Self::DeleteResource => "TYPE_DELETE_RESOURCE", + Self::DeleteTableItem => "TYPE_DELETE_TABLE_ITEM", + Self::WriteModule => "TYPE_WRITE_MODULE", + Self::WriteResource => "TYPE_WRITE_RESOURCE", + Self::WriteTableItem => "TYPE_WRITE_TABLE_ITEM", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -465,8 +436,7 @@ pub mod write_set_change { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Change { #[prost(message, tag="2")] DeleteModule(super::DeleteModule), @@ -482,7 +452,6 @@ pub mod write_set_change { WriteTableItem(super::WriteTableItem), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteModule { #[prost(string, tag="1")] @@ -492,7 +461,6 @@ pub struct DeleteModule { #[prost(message, optional, tag="3")] pub module: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteResource { #[prost(string, tag="1")] @@ -504,7 +472,6 @@ pub struct DeleteResource { #[prost(string, tag="4")] pub type_str: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteTableItem { #[prost(bytes="vec", tag="1")] @@ -516,7 +483,6 @@ pub struct DeleteTableItem { #[prost(message, optional, tag="4")] pub data: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteTableData { #[prost(string, tag="1")] @@ -524,7 +490,6 @@ pub struct DeleteTableData { #[prost(string, tag="2")] pub key_type: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteModule { #[prost(string, tag="1")] @@ -534,7 +499,6 @@ pub struct WriteModule { #[prost(message, optional, tag="3")] pub data: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteResource { #[prost(string, tag="1")] @@ -548,7 +512,6 @@ pub struct WriteResource { #[prost(string, tag="5")] pub data: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteTableData { #[prost(string, tag="1")] @@ -560,7 +523,6 @@ pub struct WriteTableData { #[prost(string, tag="4")] pub value_type: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteTableItem { #[prost(bytes="vec", tag="1")] @@ -572,7 +534,6 @@ pub struct WriteTableItem { #[prost(message, optional, tag="4")] pub data: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionPayload { #[prost(enumeration="transaction_payload::Type", tag="1")] @@ -598,11 +559,11 @@ pub mod transaction_payload { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", - Type::ScriptPayload => "TYPE_SCRIPT_PAYLOAD", - Type::WriteSetPayload => "TYPE_WRITE_SET_PAYLOAD", - Type::MultisigPayload => "TYPE_MULTISIG_PAYLOAD", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", + Self::ScriptPayload => "TYPE_SCRIPT_PAYLOAD", + Self::WriteSetPayload => "TYPE_WRITE_SET_PAYLOAD", + Self::MultisigPayload => "TYPE_MULTISIG_PAYLOAD", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -617,8 +578,7 @@ pub mod transaction_payload { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Payload { #[prost(message, tag="2")] EntryFunctionPayload(super::EntryFunctionPayload), @@ -630,7 +590,6 @@ pub mod transaction_payload { MultisigPayload(super::MultisigPayload), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EntryFunctionPayload { #[prost(message, optional, tag="1")] @@ -642,7 +601,6 @@ pub struct EntryFunctionPayload { #[prost(string, tag="4")] pub entry_function_id_str: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveScriptBytecode { #[prost(bytes="vec", tag="1")] @@ -650,7 +608,6 @@ pub struct MoveScriptBytecode { #[prost(message, optional, tag="2")] pub abi: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ScriptPayload { #[prost(message, optional, tag="1")] @@ -660,7 +617,6 @@ pub struct ScriptPayload { #[prost(string, repeated, tag="3")] pub arguments: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultisigPayload { #[prost(string, tag="1")] @@ -668,7 +624,6 @@ pub struct MultisigPayload { #[prost(message, optional, tag="2")] pub transaction_payload: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultisigTransactionPayload { #[prost(enumeration="multisig_transaction_payload::Type", tag="1")] @@ -691,8 +646,8 @@ pub mod multisig_transaction_payload { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -704,14 +659,12 @@ pub mod multisig_transaction_payload { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Payload { #[prost(message, tag="2")] EntryFunctionPayload(super::EntryFunctionPayload), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveModuleBytecode { #[prost(bytes="vec", tag="1")] @@ -719,7 +672,6 @@ pub struct MoveModuleBytecode { #[prost(message, optional, tag="2")] pub abi: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveModule { #[prost(string, tag="1")] @@ -733,7 +685,6 @@ pub struct MoveModule { #[prost(message, repeated, tag="5")] pub structs: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveFunction { #[prost(string, tag="1")] @@ -766,10 +717,10 @@ pub mod move_function { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Visibility::Unspecified => "VISIBILITY_UNSPECIFIED", - Visibility::Private => "VISIBILITY_PRIVATE", - Visibility::Public => "VISIBILITY_PUBLIC", - Visibility::Friend => "VISIBILITY_FRIEND", + Self::Unspecified => "VISIBILITY_UNSPECIFIED", + Self::Private => "VISIBILITY_PRIVATE", + Self::Public => "VISIBILITY_PUBLIC", + Self::Friend => "VISIBILITY_FRIEND", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -784,7 +735,6 @@ pub mod move_function { } } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStruct { #[prost(string, tag="1")] @@ -800,7 +750,6 @@ pub struct MoveStruct { #[prost(message, repeated, tag="5")] pub fields: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStructGenericTypeParam { #[prost(enumeration="MoveAbility", repeated, tag="1")] @@ -808,7 +757,6 @@ pub struct MoveStructGenericTypeParam { #[prost(bool, tag="2")] pub is_phantom: bool, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStructField { #[prost(string, tag="1")] @@ -816,13 +764,11 @@ pub struct MoveStructField { #[prost(message, optional, tag="2")] pub r#type: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveFunctionGenericTypeParam { #[prost(enumeration="MoveAbility", repeated, tag="1")] pub constraints: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveType { #[prost(enumeration="MoveTypes", tag="1")] @@ -832,16 +778,14 @@ pub struct MoveType { } /// Nested message and enum types in `MoveType`. pub mod move_type { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReferenceType { #[prost(bool, tag="1")] pub mutable: bool, #[prost(message, optional, boxed, tag="2")] pub to: ::core::option::Option<::prost::alloc::boxed::Box>, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Content { #[prost(message, tag="3")] Vector(::prost::alloc::boxed::Box), @@ -855,13 +799,11 @@ pub mod move_type { Unparsable(::prost::alloc::string::String), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteSetPayload { #[prost(message, optional, tag="1")] pub write_set: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EntryFunctionId { #[prost(message, optional, tag="1")] @@ -869,7 +811,6 @@ pub struct EntryFunctionId { #[prost(string, tag="2")] pub name: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveModuleId { #[prost(string, tag="1")] @@ -877,7 +818,6 @@ pub struct MoveModuleId { #[prost(string, tag="2")] pub name: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStructTag { #[prost(string, tag="1")] @@ -889,7 +829,6 @@ pub struct MoveStructTag { #[prost(message, repeated, tag="4")] pub generic_type_params: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Signature { #[prost(enumeration="signature::Type", tag="1")] @@ -916,12 +855,12 @@ pub mod signature { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::MultiEd25519 => "TYPE_MULTI_ED25519", - Type::MultiAgent => "TYPE_MULTI_AGENT", - Type::FeePayer => "TYPE_FEE_PAYER", - Type::SingleSender => "TYPE_SINGLE_SENDER", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::MultiEd25519 => "TYPE_MULTI_ED25519", + Self::MultiAgent => "TYPE_MULTI_AGENT", + Self::FeePayer => "TYPE_FEE_PAYER", + Self::SingleSender => "TYPE_SINGLE_SENDER", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -937,8 +876,7 @@ pub mod signature { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Signature { #[prost(message, tag="2")] Ed25519(super::Ed25519Signature), @@ -953,7 +891,6 @@ pub mod signature { SingleSender(super::SingleSender), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Ed25519Signature { #[prost(bytes="vec", tag="1")] @@ -961,7 +898,6 @@ pub struct Ed25519Signature { #[prost(bytes="vec", tag="2")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiEd25519Signature { #[prost(bytes="vec", repeated, tag="1")] @@ -973,7 +909,6 @@ pub struct MultiEd25519Signature { #[prost(uint32, repeated, tag="4")] pub public_key_indices: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiAgentSignature { #[prost(message, optional, tag="1")] @@ -983,7 +918,6 @@ pub struct MultiAgentSignature { #[prost(message, repeated, tag="3")] pub secondary_signers: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FeePayerSignature { #[prost(message, optional, tag="1")] @@ -997,7 +931,6 @@ pub struct FeePayerSignature { #[prost(message, optional, tag="5")] pub fee_payer_signer: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnyPublicKey { #[prost(enumeration="any_public_key::Type", tag="1")] @@ -1024,12 +957,12 @@ pub mod any_public_key { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", - Type::Secp256r1Ecdsa => "TYPE_SECP256R1_ECDSA", - Type::Keyless => "TYPE_KEYLESS", - Type::FederatedKeyless => "TYPE_FEDERATED_KEYLESS", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", + Self::Secp256r1Ecdsa => "TYPE_SECP256R1_ECDSA", + Self::Keyless => "TYPE_KEYLESS", + Self::FederatedKeyless => "TYPE_FEDERATED_KEYLESS", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1046,7 +979,6 @@ pub mod any_public_key { } } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnySignature { #[prost(enumeration="any_signature::Type", tag="1")] @@ -1078,11 +1010,11 @@ pub mod any_signature { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", - Type::Webauthn => "TYPE_WEBAUTHN", - Type::Keyless => "TYPE_KEYLESS", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", + Self::Webauthn => "TYPE_WEBAUTHN", + Self::Keyless => "TYPE_KEYLESS", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1098,8 +1030,7 @@ pub mod any_signature { } } /// Support: >= 1.10. - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum SignatureVariant { #[prost(message, tag="3")] Ed25519(super::Ed25519), @@ -1111,31 +1042,26 @@ pub mod any_signature { Keyless(super::Keyless), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Ed25519 { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Secp256k1Ecdsa { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WebAuthn { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Keyless { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SingleKeySignature { #[prost(message, optional, tag="1")] @@ -1143,7 +1069,6 @@ pub struct SingleKeySignature { #[prost(message, optional, tag="2")] pub signature: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct IndexedSignature { #[prost(uint32, tag="1")] @@ -1151,7 +1076,6 @@ pub struct IndexedSignature { #[prost(message, optional, tag="2")] pub signature: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiKeySignature { #[prost(message, repeated, tag="1")] @@ -1161,13 +1085,11 @@ pub struct MultiKeySignature { #[prost(uint32, tag="3")] pub signatures_required: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SingleSender { #[prost(message, optional, tag="1")] pub sender: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountSignature { #[prost(enumeration="account_signature::Type", tag="1")] @@ -1193,11 +1115,11 @@ pub mod account_signature { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::MultiEd25519 => "TYPE_MULTI_ED25519", - Type::SingleKey => "TYPE_SINGLE_KEY", - Type::MultiKey => "TYPE_MULTI_KEY", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::MultiEd25519 => "TYPE_MULTI_ED25519", + Self::SingleKey => "TYPE_SINGLE_KEY", + Self::MultiKey => "TYPE_MULTI_KEY", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1212,8 +1134,7 @@ pub mod account_signature { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Signature { #[prost(message, tag="2")] Ed25519(super::Ed25519Signature), @@ -1226,7 +1147,6 @@ pub mod account_signature { MultiKeySignature(super::MultiKeySignature), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionSizeInfo { #[prost(uint32, tag="1")] @@ -1236,16 +1156,14 @@ pub struct TransactionSizeInfo { #[prost(message, repeated, tag="3")] pub write_op_size_info: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct EventSizeInfo { #[prost(uint32, tag="1")] pub type_tag_bytes: u32, #[prost(uint32, tag="2")] pub total_bytes: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WriteOpSizeInfo { #[prost(uint32, tag="1")] pub key_bytes: u32, @@ -1283,21 +1201,21 @@ impl MoveTypes { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - MoveTypes::Unspecified => "MOVE_TYPES_UNSPECIFIED", - MoveTypes::Bool => "MOVE_TYPES_BOOL", - MoveTypes::U8 => "MOVE_TYPES_U8", - MoveTypes::U16 => "MOVE_TYPES_U16", - MoveTypes::U32 => "MOVE_TYPES_U32", - MoveTypes::U64 => "MOVE_TYPES_U64", - MoveTypes::U128 => "MOVE_TYPES_U128", - MoveTypes::U256 => "MOVE_TYPES_U256", - MoveTypes::Address => "MOVE_TYPES_ADDRESS", - MoveTypes::Signer => "MOVE_TYPES_SIGNER", - MoveTypes::Vector => "MOVE_TYPES_VECTOR", - MoveTypes::Struct => "MOVE_TYPES_STRUCT", - MoveTypes::GenericTypeParam => "MOVE_TYPES_GENERIC_TYPE_PARAM", - MoveTypes::Reference => "MOVE_TYPES_REFERENCE", - MoveTypes::Unparsable => "MOVE_TYPES_UNPARSABLE", + Self::Unspecified => "MOVE_TYPES_UNSPECIFIED", + Self::Bool => "MOVE_TYPES_BOOL", + Self::U8 => "MOVE_TYPES_U8", + Self::U16 => "MOVE_TYPES_U16", + Self::U32 => "MOVE_TYPES_U32", + Self::U64 => "MOVE_TYPES_U64", + Self::U128 => "MOVE_TYPES_U128", + Self::U256 => "MOVE_TYPES_U256", + Self::Address => "MOVE_TYPES_ADDRESS", + Self::Signer => "MOVE_TYPES_SIGNER", + Self::Vector => "MOVE_TYPES_VECTOR", + Self::Struct => "MOVE_TYPES_STRUCT", + Self::GenericTypeParam => "MOVE_TYPES_GENERIC_TYPE_PARAM", + Self::Reference => "MOVE_TYPES_REFERENCE", + Self::Unparsable => "MOVE_TYPES_UNPARSABLE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1338,11 +1256,11 @@ impl MoveAbility { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - MoveAbility::Unspecified => "MOVE_ABILITY_UNSPECIFIED", - MoveAbility::Copy => "MOVE_ABILITY_COPY", - MoveAbility::Drop => "MOVE_ABILITY_DROP", - MoveAbility::Store => "MOVE_ABILITY_STORE", - MoveAbility::Key => "MOVE_ABILITY_KEY", + Self::Unspecified => "MOVE_ABILITY_UNSPECIFIED", + Self::Copy => "MOVE_ABILITY_COPY", + Self::Drop => "MOVE_ABILITY_DROP", + Self::Store => "MOVE_ABILITY_STORE", + Self::Key => "MOVE_ABILITY_KEY", } } /// Creates an enum from field names used in the ProtoBuf definition. diff --git a/protos/rust/src/pb/aptos.util.timestamp.rs b/protos/rust/src/pb/aptos.util.timestamp.rs index ec95fda8d3d21..f746dba3fd7b1 100644 --- a/protos/rust/src/pb/aptos.util.timestamp.rs +++ b/protos/rust/src/pb/aptos.util.timestamp.rs @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // @generated -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +// This file is @generated by prost-build. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Timestamp { /// Represents seconds of UTC time since Unix epoch /// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to diff --git a/protos/typescript/src/index.aptos.indexer.v1.ts b/protos/typescript/src/index.aptos.indexer.v1.ts index b3dd8fc7ceb72..db6b45c6ddf74 100644 --- a/protos/typescript/src/index.aptos.indexer.v1.ts +++ b/protos/typescript/src/index.aptos.indexer.v1.ts @@ -1,3 +1,4 @@ /* eslint-disable */ export * from "./aptos/indexer/v1/raw_data"; +export * from "./aptos/indexer/v1/grpc";