From db4a77e71482c7dfefb8ee6031404f8dbe8b4415 Mon Sep 17 00:00:00 2001 From: Guoteng Rao <3603304+grao1991@users.noreply.github.com> Date: Thu, 10 Oct 2024 00:16:38 +0000 Subject: [PATCH] big change --- Cargo.lock | 106 +- Cargo.toml | 9 +- .../indexer-grpc-data-service/Cargo.toml | 6 + .../indexer-grpc-data-service/src/config.rs | 343 ++-- .../indexer-grpc-data-service/src/lib.rs | 3 + .../src/live_data_service.rs | 439 ++++++ .../indexer-grpc-data-service/src/metrics.rs | 162 -- .../src/old/config.rs | 244 +++ .../indexer-grpc-data-service/src/old/lib.rs | 8 + .../indexer-grpc-data-service/src/old/main.rs | 17 + .../src/old/metrics.rs | 164 ++ .../src/old/service.rs | 1319 ++++++++++++++++ .../indexer-grpc-data-service/src/service.rs | 1378 ++--------------- .../indexer-grpc-file-store/Cargo.toml | 10 + .../src/data_manager.rs | 287 ++++ .../src/file_store_uploader.rs | 127 ++ .../indexer-grpc-file-store/src/lib.rs | 402 ++++- .../indexer-grpc-file-store/src/main.rs | 4 +- .../src/metadata_manager.rs | 393 +++++ .../indexer-grpc-file-store/src/processor.rs | 304 ---- .../indexer-grpc-file-store/src/service.rs | 84 + .../src/fullnode_data_service.rs | 40 +- .../indexer-grpc-server-framework/src/lib.rs | 44 +- .../indexer-grpc-utils/Cargo.toml | 3 + .../indexer-grpc-utils/src/cache_operator.rs | 6 +- .../src/compression_util.rs | 8 +- .../indexer-grpc-utils/src/config.rs | 20 + .../src/file_store_operator_v2/gcs.rs | 89 ++ .../src/file_store_operator_v2/local.rs | 56 + .../src/file_store_operator_v2/mod.rs | 229 +++ .../indexer-grpc-utils/src/lib.rs | 16 +- .../indexer-grpc-utils/src/status_page/mod.rs | 80 + protos/proto/aptos/indexer/v1/grpc.proto | 73 + .../internal/fullnode/v1/fullnode_data.proto | 9 + .../internal/fullnode/v1/fullnode_data_pb2.py | 31 +- .../fullnode/v1/fullnode_data_pb2.pyi | 13 + .../fullnode/v1/fullnode_data_pb2_grpc.py | 45 + protos/rust/src/pb/aptos.indexer.v1.rs | 373 ++++- protos/rust/src/pb/aptos.indexer.v1.serde.rs | 1119 +++++++++++++ protos/rust/src/pb/aptos.indexer.v1.tonic.rs | 848 +++++++++- .../rust/src/pb/aptos.internal.fullnode.v1.rs | 417 ++--- .../pb/aptos.internal.fullnode.v1.serde.rs | 162 ++ .../pb/aptos.internal.fullnode.v1.tonic.rs | 170 +- .../rust/src/pb/aptos.remote_executor.v1.rs | 5 +- .../src/pb/aptos.remote_executor.v1.tonic.rs | 87 +- protos/rust/src/pb/aptos.transaction.v1.rs | 276 ++-- protos/rust/src/pb/aptos.util.timestamp.rs | 4 +- .../internal/fullnode/v1/fullnode_data.ts | 212 ++- .../typescript/src/index.aptos.indexer.v1.ts | 1 + 49 files changed, 7808 insertions(+), 2437 deletions(-) create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/live_data_service.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/config.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/lib.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/main.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/metrics.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/service.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-store/src/data_manager.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-store/src/file_store_uploader.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-store/src/metadata_manager.rs delete mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-store/src/processor.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-store/src/service.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/gcs.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/local.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/mod.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-utils/src/status_page/mod.rs create mode 100644 protos/proto/aptos/indexer/v1/grpc.proto diff --git a/Cargo.lock b/Cargo.lock index 3a5bc9ac2e05e..8eeb25c1dfd5d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -364,7 +364,7 @@ dependencies = [ "thiserror", "tokio", "toml 0.7.8", - "tonic 0.11.0", + "tonic 0.12.3", "tracing", "tracing-subscriber 0.3.18", "url", @@ -2115,13 +2115,13 @@ dependencies = [ "futures-core", "jemallocator", "once_cell", - "prost 0.12.3", + "prost 0.13.3", "redis", "reqwest 0.11.23", "serde", "tempfile", "tokio", - "tonic 0.11.0", + "tonic 0.12.3", "tracing", "url", ] @@ -2138,20 +2138,26 @@ dependencies = [ "aptos-protos 1.3.1", "aptos-transaction-filter", "async-trait", + "build_html", "clap 4.5.21", + "dashmap", "futures", "jemallocator", + "lazy_static", "once_cell", - "prost 0.12.3", + "prost 0.13.3", + "rand 0.7.3", "redis", "serde", "serde_json", "tokio", + "tokio-scoped", "tokio-stream", - "tonic 0.11.0", + "tonic 0.12.3", "tonic-reflection", "tracing", "uuid", + "warp", ] [[package]] @@ -2182,15 +2188,25 @@ dependencies = [ "aptos-indexer-grpc-utils", "aptos-metrics-core", "aptos-moving-average 0.1.0 (git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=4801acae7aea30d7e96bbfbe5ec5b04056dfa4cf)", + "aptos-protos 1.3.1", "async-trait", + "build_html", "clap 4.5.21", + "dashmap", "futures", "jemallocator", "once_cell", + "prost 0.13.3", + "rand 0.7.3", "redis", "serde", + "serde_json", "tokio", + "tokio-scoped", + "tokio-stream", + "tonic 0.12.3", "tracing", + "warp", ] [[package]] @@ -2208,7 +2224,7 @@ dependencies = [ "serde", "serde_json", "tokio", - "tonic 0.11.0", + "tonic 0.12.3", "tracing", "url", ] @@ -2264,7 +2280,7 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tonic 0.11.0", + "tonic 0.12.3", "tonic-reflection", ] @@ -2346,6 +2362,7 @@ dependencies = [ "async-trait", "backoff", "base64 0.13.1", + "build_html", "chrono", "cloud-storage", "dashmap", @@ -2354,17 +2371,19 @@ dependencies = [ "lz4", "once_cell", "prometheus", - "prost 0.12.3", + "prost 0.13.3", "redis", "redis-test", "ripemd", "serde", "serde_json", "tokio", + "tokio-stream", "tokio-util 0.7.10", - "tonic 0.11.0", + "tonic 0.12.3", "tracing", "url", + "warp", ] [[package]] @@ -2428,7 +2447,7 @@ dependencies = [ "tokio", "tokio-stream", "toml 0.7.8", - "tonic 0.11.0", + "tonic 0.12.3", "url", ] @@ -3440,9 +3459,9 @@ version = "1.3.1" dependencies = [ "futures-core", "pbjson", - "prost 0.12.3", + "prost 0.13.3", "serde", - "tonic 0.11.0", + "tonic 0.12.3", ] [[package]] @@ -3796,7 +3815,7 @@ dependencies = [ "serde", "thiserror", "tokio", - "tonic 0.11.0", + "tonic 0.12.3", "tonic-reflection", ] @@ -4264,7 +4283,7 @@ dependencies = [ "aptos-protos 1.3.1", "derive_builder", "lz4", - "prost 0.12.3", + "prost 0.13.3", "serde", "serde_json", "serde_yaml 0.8.26", @@ -5845,6 +5864,12 @@ dependencies = [ "serde", ] +[[package]] +name = "build_html" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225eb82ce9e70dcc0cfa6e404d0f353326b6e163bf500ec4711cec317d11935c" + [[package]] name = "bulletproofs" version = "4.0.0" @@ -8578,14 +8603,14 @@ dependencies = [ "hyper 1.4.1", "jsonwebtoken 9.3.0", "once_cell", - "prost 0.13.1", + "prost 0.13.3", "prost-types 0.13.1", "reqwest 0.12.5", "secret-vault-value", "serde", "serde_json", "tokio", - "tonic 0.12.1", + "tonic 0.12.3", "tower", "tower-layer", "tower-util", @@ -13782,12 +13807,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ "bytes", - "prost-derive 0.13.1", + "prost-derive 0.13.3", ] [[package]] @@ -13818,9 +13843,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", "itertools 0.13.0", @@ -13853,7 +13878,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2" dependencies = [ - "prost 0.13.1", + "prost 0.13.3", ] [[package]] @@ -14856,6 +14881,19 @@ dependencies = [ "security-framework", ] +[[package]] +name = "rustls-native-certs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.1.1", + "rustls-pki-types", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "0.2.1" @@ -16718,9 +16756,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -16915,15 +16953,16 @@ dependencies = [ [[package]] name = "tonic" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", "axum 0.7.5", "base64 0.22.1", "bytes", + "flate2", "h2 0.4.5", "http 1.1.0", "http-body 1.0.0", @@ -16933,8 +16972,8 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project 1.1.3", - "prost 0.13.1", - "rustls-native-certs 0.7.0", + "prost 0.13.3", + "rustls-native-certs 0.8.0", "rustls-pemfile 2.1.1", "socket2 0.5.5", "tokio", @@ -16944,19 +16983,20 @@ dependencies = [ "tower-layer", "tower-service", "tracing", + "zstd", ] [[package]] name = "tonic-reflection" -version = "0.11.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7" +checksum = "878d81f52e7fcfd80026b7fdb6a9b578b3c3653ba987f87f0dce4b64043cba27" dependencies = [ - "prost 0.12.3", - "prost-types 0.12.3", + "prost 0.13.3", + "prost-types 0.13.1", "tokio", "tokio-stream", - "tonic 0.11.0", + "tonic 0.12.3", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 57eb07ee62bbf..e5d4fa8f51b07 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -517,6 +517,7 @@ blst = "0.3.11" # The __private_bench feature exposes the Fp12 type which we need to implement a multi-threaded multi-pairing. blstrs = { version = "0.7.1", features = ["serde", "__private_bench"] } bollard = "0.15" +build_html = "2.5.0" bulletproofs = { version = "4.0.0" } byteorder = "1.4.3" bytes = { version = "1.4.0", features = ["serde"] } @@ -709,8 +710,8 @@ prometheus-http-query = "0.5.2" prometheus-parse = "0.2.4" proptest = "1.4.0" proptest-derive = "0.4.0" -prost = { version = "0.12.3", features = ["no-recursion-limit"] } -prost-types = "0.12.3" +prost = { version = "0.13.3", features = ["no-recursion-limit"] } +prost-types = "0.13.3" quanta = "0.10.1" quick_cache = "0.5.1" quick-junit = "0.5.0" @@ -805,7 +806,7 @@ tokio-stream = { version = "0.1.14", features = ["fs"] } tokio-test = "0.4.1" tokio-util = { version = "0.7.2", features = ["compat", "codec"] } toml = "0.7.4" -tonic = { version = "0.11.0", features = [ +tonic = { version = "0.12.3", features = [ "tls-roots", "transport", "prost", @@ -813,7 +814,7 @@ tonic = { version = "0.11.0", features = [ "codegen", "zstd", ] } -tonic-reflection = "0.11.0" +tonic-reflection = "0.12.3" topological-sort = "0.2.2" triomphe = "0.1.9" tsify-next = "0.5.4" diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-data-service/Cargo.toml index ae70ba89314fe..a722de57a4523 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/Cargo.toml @@ -21,19 +21,25 @@ aptos-moving-average = { workspace = true } aptos-protos = { workspace = true } aptos-transaction-filter = { workspace = true } async-trait = { workspace = true } +build_html = { workspace = true } clap = { workspace = true } +dashmap = { workspace = true } futures = { workspace = true } +lazy_static = { workspace = true } once_cell = { workspace = true } prost = { workspace = true } +rand = { workspace = true } redis = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } +tokio-scoped = { workspace = true } tokio-stream = { workspace = true } tonic = { workspace = true } tonic-reflection = { workspace = true } tracing = { workspace = true } uuid = { workspace = true } +warp = { workspace = true } [target.'cfg(unix)'.dependencies] jemallocator = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs index c5f621fcde703..87953f8b3e934 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs @@ -1,12 +1,18 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::service::RawDataServerWrapper; +use crate::{ + connection_manager::ConnectionManager, + historical_data_service::HistoricalDataService, + live_data_service::LiveDataService, + service::{DataServiceWrapper, DataServiceWrapperWrapper}, +}; use anyhow::{bail, Result}; use aptos_indexer_grpc_server_framework::RunnableConfig; use aptos_indexer_grpc_utils::{ - compression_util::StorageFormat, config::IndexerGrpcFileStoreConfig, - in_memory_cache::InMemoryCacheConfig, types::RedisUrl, + config::IndexerGrpcFileStoreConfig, + in_memory_cache::InMemoryCacheConfig, + status_page::{render_status_page, Tab}, }; use aptos_protos::{ indexer::v1::FILE_DESCRIPTOR_SET as INDEXER_V1_FILE_DESCRIPTOR_SET, @@ -14,12 +20,27 @@ use aptos_protos::{ util::timestamp::FILE_DESCRIPTOR_SET as UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET, }; use aptos_transaction_filter::BooleanTransactionFilter; +use build_html::{ + Container, ContainerType, Html, HtmlChild, HtmlContainer, HtmlElement, HtmlPage, HtmlTag, + Table, TableCell, TableCellType, TableRow, +}; +use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; use std::{net::SocketAddr, sync::Arc}; +use tokio::task::JoinHandle; use tonic::{codec::CompressionEncoding, transport::Server}; +use tracing::info; +use warp::{ + reply::{html, Reply, Response}, + Rejection, +}; + +include!("html.rs"); pub const SERVER_NAME: &str = "idxdatasvc"; +pub(crate) const MAX_MESSAGE_SIZE: usize = 256 * (1 << 20); + // Default max response channel size. const DEFAULT_MAX_RESPONSE_CHANNEL_SIZE: usize = 3; @@ -29,6 +50,9 @@ const DEFAULT_MAX_RESPONSE_CHANNEL_SIZE: usize = 3; const HTTP2_PING_INTERVAL_DURATION: std::time::Duration = std::time::Duration::from_secs(60); const HTTP2_PING_TIMEOUT_DURATION: std::time::Duration = std::time::Duration::from_secs(10); +static LIVE_DATA_SERVICE: OnceCell> = OnceCell::new(); +static HISTORICAL_DATA_SERVICE: OnceCell = OnceCell::new(); + #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct TlsConfig { @@ -45,9 +69,37 @@ pub struct NonTlsConfig { pub data_service_grpc_listen_address: SocketAddr, } +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct LiveDataServiceConfig { + pub enabled: bool, + #[serde(default = "LiveDataServiceConfig::default_num_slots")] + pub num_slots: usize, + #[serde(default = "LiveDataServiceConfig::default_size_limit_bytes")] + pub size_limit_bytes: usize, +} + +impl LiveDataServiceConfig { + fn default_num_slots() -> usize { + 200000000 + } + + fn default_size_limit_bytes() -> usize { + 10000000000 + } +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct HistoricalDataServiceConfig { + pub enabled: bool, + pub file_store_config: IndexerGrpcFileStoreConfig, +} + #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct IndexerGrpcDataServiceConfig { + pub chain_id: u64, /// If given, we will run a server that uses TLS. pub data_service_grpc_tls_config: Option, /// If given, we will run a server that does not use TLS. @@ -55,19 +107,6 @@ pub struct IndexerGrpcDataServiceConfig { /// The size of the response channel that response can be buffered. #[serde(default = "IndexerGrpcDataServiceConfig::default_data_service_response_channel_size")] pub data_service_response_channel_size: usize, - /// Deprecated: a list of auth tokens that are allowed to access the service. - #[serde(default)] - pub whitelisted_auth_tokens: Vec, - /// Deprecated: if set, don't check for auth tokens. - #[serde(default)] - pub disable_auth_check: bool, - /// File store config. - pub file_store_config: IndexerGrpcFileStoreConfig, - /// Redis read replica address. - pub redis_read_replica_address: RedisUrl, - /// Support compressed cache data. - #[serde(default = "IndexerGrpcDataServiceConfig::default_enable_cache_compression")] - pub enable_cache_compression: bool, #[serde(default)] pub in_memory_cache_config: InMemoryCacheConfig, /// Any transaction that matches this filter will be stripped. This means we remove @@ -82,47 +121,101 @@ pub struct IndexerGrpcDataServiceConfig { /// separate filters that describe each type of txn we want to strip. #[serde(default = "IndexerGrpcDataServiceConfig::default_txns_to_strip_filter")] pub txns_to_strip_filter: BooleanTransactionFilter, + + pub live_data_service_config: LiveDataServiceConfig, + pub historical_data_service_config: HistoricalDataServiceConfig, + pub grpc_manager_addresses: Vec, + pub self_advertised_address: String, } impl IndexerGrpcDataServiceConfig { - pub fn new( - data_service_grpc_tls_config: Option, - data_service_grpc_non_tls_config: Option, - data_service_response_channel_size: Option, - disable_auth_check: bool, - file_store_config: IndexerGrpcFileStoreConfig, - redis_read_replica_address: RedisUrl, - enable_cache_compression: bool, - in_memory_cache_config: InMemoryCacheConfig, - txns_to_strip_filter: BooleanTransactionFilter, - ) -> Self { - Self { - data_service_grpc_tls_config, - data_service_grpc_non_tls_config, - data_service_response_channel_size: data_service_response_channel_size - .unwrap_or_else(Self::default_data_service_response_channel_size), - whitelisted_auth_tokens: vec![], - disable_auth_check, - file_store_config, - redis_read_replica_address, - enable_cache_compression, - in_memory_cache_config, - txns_to_strip_filter, - } - } - pub const fn default_data_service_response_channel_size() -> usize { DEFAULT_MAX_RESPONSE_CHANNEL_SIZE } - pub const fn default_enable_cache_compression() -> bool { - false - } - pub fn default_txns_to_strip_filter() -> BooleanTransactionFilter { // This filter matches no txns. BooleanTransactionFilter::new_or(vec![]) } + + async fn create_live_data_service( + &self, + tasks: &mut Vec>>, + ) -> Option { + if !self.live_data_service_config.enabled { + return None; + } + let connection_manager = Arc::new( + ConnectionManager::new( + self.grpc_manager_addresses.clone(), + self.self_advertised_address.clone(), + /*is_live_data_service=*/ true, + ) + .await, + ); + let (handler_tx, handler_rx) = tokio::sync::mpsc::channel(10); + let service = DataServiceWrapper::new( + connection_manager.clone(), + handler_tx, + self.data_service_response_channel_size, + ); + + let connection_manager_clone = connection_manager.clone(); + tasks.push(tokio::task::spawn(async move { + connection_manager_clone.start().await; + Ok(()) + })); + + let config = self.live_data_service_config.clone(); + tasks.push(tokio::task::spawn_blocking(move || { + LIVE_DATA_SERVICE + .get_or_init(|| LiveDataService::new(config, connection_manager)) + .run(handler_rx); + Ok(()) + })); + + Some(service) + } + + async fn create_historical_data_service( + &self, + tasks: &mut Vec>>, + ) -> Option { + if !self.historical_data_service_config.enabled { + return None; + } + let connection_manager = Arc::new( + ConnectionManager::new( + self.grpc_manager_addresses.clone(), + self.self_advertised_address.clone(), + /*is_live_data_service=*/ false, + ) + .await, + ); + let (handler_tx, handler_rx) = tokio::sync::mpsc::channel(10); + let service = DataServiceWrapper::new( + connection_manager.clone(), + handler_tx, + self.data_service_response_channel_size, + ); + + let connection_manager_clone = connection_manager.clone(); + tasks.push(tokio::task::spawn(async move { + connection_manager_clone.start().await; + Ok(()) + })); + + let chain_id = self.chain_id; + let config = self.historical_data_service_config.clone(); + tasks.push(tokio::task::spawn_blocking(move || { + HISTORICAL_DATA_SERVICE + .get_or_init(|| HistoricalDataService::new(chain_id, config, connection_manager)) + .run(handler_rx); + Ok(()) + })); + + Some(service) + } } #[async_trait::async_trait] @@ -147,57 +240,42 @@ impl RunnableConfig for IndexerGrpcDataServiceConfig { .register_encoded_file_descriptor_set(INDEXER_V1_FILE_DESCRIPTOR_SET) .register_encoded_file_descriptor_set(TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET) .register_encoded_file_descriptor_set(UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET) - .build() + .build_v1() .map_err(|e| anyhow::anyhow!("Failed to build reflection service: {}", e))? .send_compressed(CompressionEncoding::Zstd) .accept_compressed(CompressionEncoding::Zstd) .accept_compressed(CompressionEncoding::Gzip); - let cache_storage_format: StorageFormat = if self.enable_cache_compression { - StorageFormat::Lz4CompressedProto - } else { - StorageFormat::Base64UncompressedProto - }; + let mut tasks = vec![]; - println!( - ">>>> Starting Redis connection: {:?}", - &self.redis_read_replica_address.0 - ); - let redis_conn = redis::Client::open(self.redis_read_replica_address.0.clone())? - .get_tokio_connection_manager() - .await?; - println!(">>>> Redis connection established"); - // InMemoryCache. - let in_memory_cache = - aptos_indexer_grpc_utils::in_memory_cache::InMemoryCache::new_with_redis_connection( - self.in_memory_cache_config.clone(), - redis_conn, - cache_storage_format, - ) - .await?; - println!(">>>> InMemoryCache established"); - // Add authentication interceptor. - let server = RawDataServerWrapper::new( - self.redis_read_replica_address.clone(), - self.file_store_config.clone(), - self.data_service_response_channel_size, - self.txns_to_strip_filter.clone(), - cache_storage_format, - Arc::new(in_memory_cache), - )?; - let svc = aptos_protos::indexer::v1::raw_data_server::RawDataServer::new(server) - .send_compressed(CompressionEncoding::Zstd) - .accept_compressed(CompressionEncoding::Zstd) - .accept_compressed(CompressionEncoding::Gzip); - println!(">>>> Starting gRPC server: {:?}", &svc); + let live_data_service = self.create_live_data_service(&mut tasks).await; + let historical_data_service = self.create_historical_data_service(&mut tasks).await; - let svc_clone = svc.clone(); + let wrapper = Arc::new(DataServiceWrapperWrapper::new( + live_data_service, + historical_data_service, + )); + let wrapper_service_raw = + aptos_protos::indexer::v1::raw_data_server::RawDataServer::from_arc(wrapper.clone()) + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Gzip) + .max_decoding_message_size(MAX_MESSAGE_SIZE) + .max_encoding_message_size(MAX_MESSAGE_SIZE); + let wrapper_service = + aptos_protos::indexer::v1::data_service_server::DataServiceServer::from_arc(wrapper) + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Gzip) + .max_decoding_message_size(MAX_MESSAGE_SIZE) + .max_encoding_message_size(MAX_MESSAGE_SIZE); + let wrapper_service_raw_clone = wrapper_service_raw.clone(); + let wrapper_service_clone = wrapper_service.clone(); let reflection_service_clone = reflection_service.clone(); - let mut tasks = vec![]; if let Some(config) = &self.data_service_grpc_non_tls_config { let listen_address = config.data_service_grpc_listen_address; - tracing::info!( + info!( grpc_address = listen_address.to_string().as_str(), "[data service] starting gRPC server with non-TLS." ); @@ -205,7 +283,8 @@ impl RunnableConfig for IndexerGrpcDataServiceConfig { Server::builder() .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) - .add_service(svc_clone) + .add_service(wrapper_service_clone) + .add_service(wrapper_service_raw_clone) .add_service(reflection_service_clone) .serve(listen_address) .await @@ -217,7 +296,7 @@ impl RunnableConfig for IndexerGrpcDataServiceConfig { let cert = tokio::fs::read(config.cert_path.clone()).await?; let key = tokio::fs::read(config.key_path.clone()).await?; let identity = tonic::transport::Identity::from_pem(cert, key); - tracing::info!( + info!( grpc_address = listen_address.to_string().as_str(), "[Data Service] Starting gRPC server with TLS." ); @@ -226,7 +305,8 @@ impl RunnableConfig for IndexerGrpcDataServiceConfig { .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) .tls_config(tonic::transport::ServerTlsConfig::new().identity(identity))? - .add_service(svc) + .add_service(wrapper_service) + .add_service(wrapper_service_raw) .add_service(reflection_service) .serve(listen_address) .await @@ -241,4 +321,89 @@ impl RunnableConfig for IndexerGrpcDataServiceConfig { fn get_server_name(&self) -> String { SERVER_NAME.to_string() } + + async fn status_page(&self) -> Result { + let mut tabs = vec![]; + // TODO(grao): Add something real. + let overview_tab_content = HtmlElement::new(HtmlTag::Div).with_raw("Welcome!").into(); + tabs.push(Tab::new("Overview", overview_tab_content)); + if let Some(live_data_service) = LIVE_DATA_SERVICE.get() { + let connection_manager_info = + render_connection_manager_info(live_data_service.get_connection_manager()); + let cache_info = render_cache_info(); + let content = HtmlElement::new(HtmlTag::Div) + .with_container(connection_manager_info) + .with_container(cache_info) + .into(); + tabs.push(Tab::new("LiveDataService", content)); + } + + if let Some(historical_data_service) = HISTORICAL_DATA_SERVICE.get() { + let connection_manager_info = + render_connection_manager_info(historical_data_service.get_connection_manager()); + let file_store_info = render_file_store_info(); + let content = HtmlElement::new(HtmlTag::Div) + .with_container(connection_manager_info) + .with_container(file_store_info) + .into(); + tabs.push(Tab::new("HistoricalDataService", content)); + } + + render_status_page(tabs) + } +} + +fn render_connection_manager_info(connection_manager: &ConnectionManager) -> Container { + let known_latest_version = connection_manager.known_latest_version(); + let active_streams = connection_manager.get_active_streams(); + let active_streams_table = active_streams.into_iter().fold( + Table::new() + .with_attributes([("style", "width: 100%; border: 5px solid black;")]) + .with_thead_attributes([("style", "background-color: lightcoral; color: white;")]) + .with_custom_header_row( + TableRow::new() + .with_cell(TableCell::new(TableCellType::Header).with_raw("Id")) + .with_cell(TableCell::new(TableCellType::Header).with_raw("Current Version")) + .with_cell(TableCell::new(TableCellType::Header).with_raw("End Version")), + ), + |table, active_stream| { + table.with_custom_body_row( + TableRow::new() + .with_cell(TableCell::new(TableCellType::Data).with_raw(active_stream.id())) + .with_cell( + TableCell::new(TableCellType::Data) + .with_raw(active_stream.current_version()), + ) + .with_cell( + TableCell::new(TableCellType::Data).with_raw(active_stream.end_version()), + ), + ) + }, + ); + + Container::new(ContainerType::Section) + .with_paragraph_attr( + "Connection Manager", + [("style", "font-size: 24px; font-weight: bold;")], + ) + .with_paragraph(format!("Known latest version: {known_latest_version}.")) + .with_paragraph_attr( + "Active Streams", + [("style", "font-size: 16px; font-weight: bold;")], + ) + .with_table(active_streams_table) +} + +fn render_cache_info() -> Container { + Container::new(ContainerType::Section).with_paragraph_attr( + "In Memory Cache", + [("style", "font-size: 24px; font-weight: bold;")], + ) +} + +fn render_file_store_info() -> Container { + Container::new(ContainerType::Section).with_paragraph_attr( + "File Store", + [("style", "font-size: 24px; font-weight: bold;")], + ) } diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs index 566941502a239..54c857fa9748b 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs @@ -2,6 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 mod config; +mod connection_manager; +mod historical_data_service; +mod live_data_service; mod metrics; mod service; diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/live_data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/live_data_service.rs new file mode 100644 index 0000000000000..806b453776c4c --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/live_data_service.rs @@ -0,0 +1,439 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{config::LiveDataServiceConfig, connection_manager::ConnectionManager}; +use aptos_protos::{ + indexer::v1::{GetTransactionsRequest, TransactionsResponse}, + transaction::v1::Transaction, +}; +use futures::future::{BoxFuture, FutureExt, Shared}; +use prost::Message; +use std::{sync::Arc, time::Duration}; +use tokio::sync::{ + mpsc::{Receiver, Sender}, + RwLock, +}; +use tonic::{Request, Status}; +use tracing::{info, trace}; +use uuid::Uuid; + +static MAX_BYTES_PER_BATCH: usize = 20 * (1 << 20); + +struct DataClient { + connection_manager: Arc, +} + +impl DataClient { + fn new(connection_manager: Arc) -> Self { + Self { connection_manager } + } + + async fn fetch_transactions(&self, starting_version: u64) -> Vec { + trace!("Fetching transactions from GrpcManager, start_version: {starting_version}."); + + let request = GetTransactionsRequest { + starting_version: Some(starting_version), + transactions_count: None, + batch_size: None, + }; + loop { + let mut client = self + .connection_manager + .get_grpc_manager_client_for_request(); + let response = client.get_transactions(request).await; + if let Ok(response) = response { + return response.into_inner().transactions; + } + // TODO(grao): Error handling. + } + } +} + +type FetchTask<'a> = Shared>; + +struct FetchManager<'a> { + data_manager: Arc>, + data_client: Arc, + fetching_latest_data_task: RwLock>>, +} + +impl<'a> FetchManager<'a> { + fn new( + data_manager: Arc>, + connection_manager: Arc, + ) -> Self { + Self { + data_manager, + data_client: Arc::new(DataClient::new(connection_manager)), + fetching_latest_data_task: RwLock::new(None), + } + } + + async fn fetch_past_data(&self, version: u64) -> usize { + Self::fetch_and_update_cache(self.data_client.clone(), self.data_manager.clone(), version) + .await + } + + async fn fetch_and_update_cache( + data_client: Arc, + data_manager: Arc>, + version: u64, + ) -> usize { + let transactions = data_client.fetch_transactions(version).await; + let len = transactions.len(); + + if len > 0 { + data_manager + .write() + .await + .update_data(version, transactions); + } + + len + } + + async fn fetch_latest_data(&'a self) -> usize { + let version = self.data_manager.read().await.end_version; + info!("Fetching latest data starting from version {version}."); + loop { + let num_transactions = Self::fetch_and_update_cache( + self.data_client.clone(), + self.data_manager.clone(), + version, + ) + .await; + if num_transactions != 0 { + info!("Finished fetching latest data, got {num_transactions} num_transactions starting from version {version}."); + return num_transactions; + } + tokio::time::sleep(Duration::from_millis(200)).await; + } + } + + async fn continuously_fetch_latest_data(&'a self) { + loop { + let task = self.fetch_latest_data().boxed().shared(); + *self.fetching_latest_data_task.write().await = Some(task.clone()); + let _ = task.await; + } + } +} + +struct DataManager { + start_version: u64, + end_version: u64, + data: Vec>>, + + soft_limit_for_eviction: usize, + eviction_target: usize, + total_size: usize, + num_slots: usize, +} + +impl DataManager { + fn new(end_version: u64, num_slots: usize, size_limit_bytes: usize) -> Self { + Self { + start_version: end_version.saturating_sub(num_slots as u64), + end_version, + data: vec![None; num_slots], + soft_limit_for_eviction: size_limit_bytes, + eviction_target: size_limit_bytes, + total_size: 0, + num_slots, + } + } + + fn update_data(&mut self, start_version: u64, transactions: Vec) { + let end_version = start_version + transactions.len() as u64; + + trace!( + "Updating data for {} transactions in range [{start_version}, {end_version}).", + transactions.len(), + ); + if start_version > self.end_version { + // TODO(grao): unexpected + return; + } + + if end_version <= self.start_version { + // TODO(grao): Log and counter. + return; + } + + let num_to_skip = self.start_version.saturating_sub(start_version); + let start_version = start_version.max(self.start_version); + + let mut size_increased = 0; + let mut size_decreased = 0; + + for (i, transaction) in transactions + .into_iter() + .enumerate() + .skip(num_to_skip as usize) + { + let version = start_version + i as u64; + let slot_index = version as usize % self.num_slots; + if let Some(transaction) = self.data[slot_index].take() { + size_decreased += transaction.encoded_len(); + } + size_increased += transaction.encoded_len(); + self.data[version as usize % self.num_slots] = Some(Box::new(transaction)); + } + + if end_version > self.end_version { + self.end_version = end_version; + if self.start_version + (self.num_slots as u64) < end_version { + self.start_version = end_version - self.num_slots as u64; + } + } + + self.total_size += size_increased; + self.total_size -= size_decreased; + + if self.total_size >= self.soft_limit_for_eviction { + while self.total_size >= self.eviction_target { + if let Some(transaction) = + self.data[self.start_version as usize % self.num_slots].take() + { + self.total_size -= transaction.encoded_len(); + drop(transaction); + } + self.start_version += 1; + } + } + } +} + +pub struct InMemoryCache<'a> { + data_manager: Arc>, + fetch_manager: Arc>, +} + +impl<'a> InMemoryCache<'a> { + pub fn new( + connection_manager: Arc, + known_latest_version: u64, + num_slots: usize, + size_limit_bytes: usize, + ) -> Self { + let data_manager = Arc::new(RwLock::new(DataManager::new( + known_latest_version + 1, + num_slots, + size_limit_bytes, + ))); + let fetch_manager = Arc::new(FetchManager::new(data_manager.clone(), connection_manager)); + Self { + data_manager, + fetch_manager, + } + } + + async fn get_data( + &'a self, + starting_version: u64, + ending_version: u64, + max_num_transactions_per_batch: usize, + max_bytes_per_batch: usize, + ) -> Option> { + while starting_version >= self.data_manager.read().await.end_version { + trace!("Reached head, wait..."); + let num_transactions = self + .fetch_manager + .fetching_latest_data_task + .read() + .await + .as_ref() + .unwrap() + .clone() + .await; + + trace!("Done waiting, got {num_transactions} transactions at head."); + } + + loop { + let data_manager = self.data_manager.read().await; + + trace!("Getting data from cache, requested_version: {starting_version}, oldest available version: {}.", data_manager.start_version); + if starting_version < data_manager.start_version { + return None; + } + + let start_index = starting_version as usize % data_manager.num_slots; + + if data_manager.data[start_index].is_none() { + drop(data_manager); + self.fetch_manager.fetch_past_data(starting_version).await; + continue; + } + + let mut total_bytes = 0; + let mut version = starting_version; + let ending_version = ending_version.min(data_manager.end_version); + + if let Some(_) = data_manager.data[version as usize % data_manager.num_slots].as_ref() { + let mut result = Vec::new(); + while version < ending_version + && total_bytes < max_bytes_per_batch + && result.len() < max_num_transactions_per_batch + { + if let Some(transaction) = + data_manager.data[version as usize % data_manager.num_slots].as_ref() + { + total_bytes += transaction.encoded_len(); + result.push(transaction.as_ref().clone()); + version += 1; + } else { + break; + } + } + trace!("Data was sent from cache, last version: {}.", version - 1); + return Some(result); + } else { + unreachable!("Data cannot be None."); + } + } + } +} + +pub struct LiveDataService<'a> { + in_memory_cache: InMemoryCache<'a>, + connection_manager: Arc, +} + +impl<'a> LiveDataService<'a> { + pub fn new(config: LiveDataServiceConfig, connection_manager: Arc) -> Self { + let known_latest_version = connection_manager.known_latest_version(); + Self { + connection_manager: connection_manager.clone(), + in_memory_cache: InMemoryCache::new( + connection_manager, + known_latest_version, + config.num_slots, + config.size_limit_bytes, + ), + } + } + + pub fn run( + &'a self, + mut handler_rx: Receiver<( + Request, + Sender>, + )>, + ) { + info!("Running LiveDataService..."); + tokio_scoped::scope(|scope| { + scope.spawn(async move { + let _ = self + .in_memory_cache + .fetch_manager + .continuously_fetch_latest_data() + .await; + }); + while let Some((request, response_sender)) = handler_rx.blocking_recv() { + // TODO(grao): Store request metadata. + let request = request.into_inner(); + let id = Uuid::new_v4().to_string(); + let known_latest_version = self.get_known_latest_version(); + let starting_version = request.starting_version.unwrap_or(known_latest_version); + + info!("Received request: {request:?}."); + if starting_version > known_latest_version + 10000 { + let err = Err(Status::failed_precondition( + "starting_version cannot be set to a far future version.", + )); + info!("Client error: {err:?}."); + let _ = response_sender.blocking_send(err); + continue; + } + + let max_num_transactions_per_batch = if let Some(batch_size) = request.batch_size { + batch_size as usize + } else { + 10000 + }; + + let ending_version = request + .transactions_count + .map(|count| starting_version + count); + + scope.spawn(async move { + self.start_streaming( + id, + starting_version, + ending_version, + max_num_transactions_per_batch, + MAX_BYTES_PER_BATCH, + response_sender, + ) + .await + }); + } + }); + } + + pub(crate) fn get_connection_manager(&self) -> &ConnectionManager { + &self.connection_manager + } + + async fn start_streaming( + &'a self, + id: String, + starting_version: u64, + ending_version: Option, + max_num_transactions_per_batch: usize, + max_bytes_per_batch: usize, + response_sender: tokio::sync::mpsc::Sender>, + ) { + info!("Start streaming, starting_version: {starting_version}, ending_version: {ending_version:?}."); + self.connection_manager + .insert_active_stream(&id, starting_version, ending_version); + let mut next_version = starting_version; + let ending_version = ending_version.unwrap_or(u64::MAX); + loop { + if next_version >= ending_version { + self.connection_manager.remove_active_stream(&id); + break; + } + self.connection_manager + .update_stream_progress(&id, next_version); + let known_latest_version = self.get_known_latest_version(); + if next_version > known_latest_version { + info!("next_version {next_version} is larger than known_latest_version {known_latest_version}"); + tokio::time::sleep(Duration::from_millis(100)).await; + continue; + } + + if let Some(transactions) = self + .in_memory_cache + .get_data( + next_version, + ending_version, + max_num_transactions_per_batch, + max_bytes_per_batch, + ) + .await + { + next_version += transactions.len() as u64; + let response = TransactionsResponse { + transactions, + // TODO(grao): Fix chain id. + chain_id: Some(0), + }; + if let Err(_) = response_sender.send(Ok(response)).await { + info!("Client dropped."); + break; + } + } else { + let err = Err(Status::not_found("Requested data is too old.")); + info!("Client error: {err:?}."); + let _ = response_sender.send(err).await; + break; + } + } + } + + fn get_known_latest_version(&self) -> u64 { + self.connection_manager.known_latest_version() + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs index 4813efda1aed9..7f1658608ae14 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/metrics.rs @@ -1,164 +1,2 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 - -use aptos_metrics_core::{ - register_gauge_vec, register_int_counter_vec, register_int_gauge_vec, GaugeVec, IntCounterVec, - IntGaugeVec, -}; -use once_cell::sync::Lazy; - -// The `identifier` label at the time of writing (2024-04-08) is always the -// application ID, a globally unique ID. - -/// Latest processed transaction version. -pub static LATEST_PROCESSED_VERSION_PER_PROCESSOR: Lazy = Lazy::new(|| { - register_int_gauge_vec!( - "indexer_grpc_data_service_with_user_latest_processed_version", - "Latest processed transaction version", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Number of transactions that served by data service. -pub static PROCESSED_VERSIONS_COUNT_PER_PROCESSOR: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_with_user_processed_versions", - "Number of transactions that have been processed by data service", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Number of errors that data service has encountered. -pub static ERROR_COUNT: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_error", - "Number of errors that data service has encountered", - &["error_type"] - ) - .unwrap() -}); - -/// Data latency for data service based on latest processed transaction based on selected processor. -pub static PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR: Lazy = Lazy::new(|| { - register_gauge_vec!( - "indexer_grpc_data_service_with_user_latest_data_latency_in_secs", - "Latency of data service based on latest processed transaction", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Count of connections that data service has established. -pub static CONNECTION_COUNT: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_connection_count_v2", - "Count of connections that data service has established", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Count of the short connections; i.e., < 10 seconds. -pub static SHORT_CONNECTION_COUNT: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_short_connection_by_user_processor_count", - "Count of the short connections; i.e., < 10 seconds", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Count of bytes transfered to the client. This only represents the bytes prepared and -/// ready to send to the client. This only t It does not represent the bytes actually -/// sent to the client. -/// -/// This is pre stripping, so it may include bytes for transactions that were later -/// stripped. See BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING for post -/// stirpping. -pub static BYTES_READY_TO_TRANSFER_FROM_SERVER: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_bytes_ready_to_transfer_from_server", - "Count of bytes ready to transfer to the client (pre stripping)", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); - -/// Count of bytes transfered to the client. This only represents the bytes prepared and -/// ready to send to the client. This only t It does not represent the bytes actually -/// sent to the client. -/// -/// This is post stripping, meaning some transactions may have been stripped (removing -/// things such as events, writesets, payload, signature). Compare this with -/// BYTES_READY_TO_TRANSFER_FROM_SERVER to see how many bytes were stripped. -pub static BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING: Lazy = - Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_bytes_ready_to_transfer_from_server_after_stripping", - "Count of bytes ready to transfer to the client (post stripping)", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() - }); - -/// The number of transactions that had data (such as events, writesets, payload, -/// signature) stripped from them due to the `txns_to_strip_filter`. See -/// `strip_transactions` for more. -pub static NUM_TRANSACTIONS_STRIPPED: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "indexer_grpc_data_service_num_transactions_stripped", - "Number of transactions that had data (such as events, writesets, payload, signature) stripped from them", - &[ - "identifier_type", - "identifier", - "email", - "application_name", - "processor" - ], - ) - .unwrap() -}); diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/config.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/config.rs new file mode 100644 index 0000000000000..c5f621fcde703 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/config.rs @@ -0,0 +1,244 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::service::RawDataServerWrapper; +use anyhow::{bail, Result}; +use aptos_indexer_grpc_server_framework::RunnableConfig; +use aptos_indexer_grpc_utils::{ + compression_util::StorageFormat, config::IndexerGrpcFileStoreConfig, + in_memory_cache::InMemoryCacheConfig, types::RedisUrl, +}; +use aptos_protos::{ + indexer::v1::FILE_DESCRIPTOR_SET as INDEXER_V1_FILE_DESCRIPTOR_SET, + transaction::v1::FILE_DESCRIPTOR_SET as TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET, + util::timestamp::FILE_DESCRIPTOR_SET as UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET, +}; +use aptos_transaction_filter::BooleanTransactionFilter; +use serde::{Deserialize, Serialize}; +use std::{net::SocketAddr, sync::Arc}; +use tonic::{codec::CompressionEncoding, transport::Server}; + +pub const SERVER_NAME: &str = "idxdatasvc"; + +// Default max response channel size. +const DEFAULT_MAX_RESPONSE_CHANNEL_SIZE: usize = 3; + +// HTTP2 ping interval and timeout. +// This can help server to garbage collect dead connections. +// tonic server: https://docs.rs/tonic/latest/tonic/transport/server/struct.Server.html#method.http2_keepalive_interval +const HTTP2_PING_INTERVAL_DURATION: std::time::Duration = std::time::Duration::from_secs(60); +const HTTP2_PING_TIMEOUT_DURATION: std::time::Duration = std::time::Duration::from_secs(10); + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct TlsConfig { + /// The address for the TLS GRPC server to listen on. + pub data_service_grpc_listen_address: SocketAddr, + pub cert_path: String, + pub key_path: String, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct NonTlsConfig { + /// The address for the TLS GRPC server to listen on. + pub data_service_grpc_listen_address: SocketAddr, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct IndexerGrpcDataServiceConfig { + /// If given, we will run a server that uses TLS. + pub data_service_grpc_tls_config: Option, + /// If given, we will run a server that does not use TLS. + pub data_service_grpc_non_tls_config: Option, + /// The size of the response channel that response can be buffered. + #[serde(default = "IndexerGrpcDataServiceConfig::default_data_service_response_channel_size")] + pub data_service_response_channel_size: usize, + /// Deprecated: a list of auth tokens that are allowed to access the service. + #[serde(default)] + pub whitelisted_auth_tokens: Vec, + /// Deprecated: if set, don't check for auth tokens. + #[serde(default)] + pub disable_auth_check: bool, + /// File store config. + pub file_store_config: IndexerGrpcFileStoreConfig, + /// Redis read replica address. + pub redis_read_replica_address: RedisUrl, + /// Support compressed cache data. + #[serde(default = "IndexerGrpcDataServiceConfig::default_enable_cache_compression")] + pub enable_cache_compression: bool, + #[serde(default)] + pub in_memory_cache_config: InMemoryCacheConfig, + /// Any transaction that matches this filter will be stripped. This means we remove + /// the payload, signature, events, and writesets from it before sending it + /// downstream. This should only be used in an emergency situation, e.g. when txns + /// related to a certain module are too large and are causing issues for the data + /// service. Learn more here: + /// + /// https://www.notion.so/aptoslabs/Runbook-c006a37259394ac2ba904d6b54d180fa?pvs=4#171c210964ec42a89574fc80154f9e85 + /// + /// Generally you will want to start with this with an OR, and then list out + /// separate filters that describe each type of txn we want to strip. + #[serde(default = "IndexerGrpcDataServiceConfig::default_txns_to_strip_filter")] + pub txns_to_strip_filter: BooleanTransactionFilter, +} + +impl IndexerGrpcDataServiceConfig { + pub fn new( + data_service_grpc_tls_config: Option, + data_service_grpc_non_tls_config: Option, + data_service_response_channel_size: Option, + disable_auth_check: bool, + file_store_config: IndexerGrpcFileStoreConfig, + redis_read_replica_address: RedisUrl, + enable_cache_compression: bool, + in_memory_cache_config: InMemoryCacheConfig, + txns_to_strip_filter: BooleanTransactionFilter, + ) -> Self { + Self { + data_service_grpc_tls_config, + data_service_grpc_non_tls_config, + data_service_response_channel_size: data_service_response_channel_size + .unwrap_or_else(Self::default_data_service_response_channel_size), + whitelisted_auth_tokens: vec![], + disable_auth_check, + file_store_config, + redis_read_replica_address, + enable_cache_compression, + in_memory_cache_config, + txns_to_strip_filter, + } + } + + pub const fn default_data_service_response_channel_size() -> usize { + DEFAULT_MAX_RESPONSE_CHANNEL_SIZE + } + + pub const fn default_enable_cache_compression() -> bool { + false + } + + pub fn default_txns_to_strip_filter() -> BooleanTransactionFilter { + // This filter matches no txns. + BooleanTransactionFilter::new_or(vec![]) + } +} + +#[async_trait::async_trait] +impl RunnableConfig for IndexerGrpcDataServiceConfig { + fn validate(&self) -> Result<()> { + if self.data_service_grpc_non_tls_config.is_none() + && self.data_service_grpc_tls_config.is_none() + { + bail!("At least one of data_service_grpc_non_tls_config and data_service_grpc_tls_config must be set"); + } + self.in_memory_cache_config.validate()?; + Ok(()) + } + + async fn run(&self) -> Result<()> { + let reflection_service = tonic_reflection::server::Builder::configure() + // Note: It is critical that the file descriptor set is registered for every + // file that the top level API proto depends on recursively. If you don't, + // compilation will still succeed but reflection will fail at runtime. + // + // TODO: Add a test for this / something in build.rs, this is a big footgun. + .register_encoded_file_descriptor_set(INDEXER_V1_FILE_DESCRIPTOR_SET) + .register_encoded_file_descriptor_set(TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET) + .register_encoded_file_descriptor_set(UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET) + .build() + .map_err(|e| anyhow::anyhow!("Failed to build reflection service: {}", e))? + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Gzip); + + let cache_storage_format: StorageFormat = if self.enable_cache_compression { + StorageFormat::Lz4CompressedProto + } else { + StorageFormat::Base64UncompressedProto + }; + + println!( + ">>>> Starting Redis connection: {:?}", + &self.redis_read_replica_address.0 + ); + let redis_conn = redis::Client::open(self.redis_read_replica_address.0.clone())? + .get_tokio_connection_manager() + .await?; + println!(">>>> Redis connection established"); + // InMemoryCache. + let in_memory_cache = + aptos_indexer_grpc_utils::in_memory_cache::InMemoryCache::new_with_redis_connection( + self.in_memory_cache_config.clone(), + redis_conn, + cache_storage_format, + ) + .await?; + println!(">>>> InMemoryCache established"); + // Add authentication interceptor. + let server = RawDataServerWrapper::new( + self.redis_read_replica_address.clone(), + self.file_store_config.clone(), + self.data_service_response_channel_size, + self.txns_to_strip_filter.clone(), + cache_storage_format, + Arc::new(in_memory_cache), + )?; + let svc = aptos_protos::indexer::v1::raw_data_server::RawDataServer::new(server) + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Gzip); + println!(">>>> Starting gRPC server: {:?}", &svc); + + let svc_clone = svc.clone(); + let reflection_service_clone = reflection_service.clone(); + + let mut tasks = vec![]; + if let Some(config) = &self.data_service_grpc_non_tls_config { + let listen_address = config.data_service_grpc_listen_address; + tracing::info!( + grpc_address = listen_address.to_string().as_str(), + "[data service] starting gRPC server with non-TLS." + ); + tasks.push(tokio::spawn(async move { + Server::builder() + .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) + .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) + .add_service(svc_clone) + .add_service(reflection_service_clone) + .serve(listen_address) + .await + .map_err(|e| anyhow::anyhow!(e)) + })); + } + if let Some(config) = &self.data_service_grpc_tls_config { + let listen_address = config.data_service_grpc_listen_address; + let cert = tokio::fs::read(config.cert_path.clone()).await?; + let key = tokio::fs::read(config.key_path.clone()).await?; + let identity = tonic::transport::Identity::from_pem(cert, key); + tracing::info!( + grpc_address = listen_address.to_string().as_str(), + "[Data Service] Starting gRPC server with TLS." + ); + tasks.push(tokio::spawn(async move { + Server::builder() + .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) + .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) + .tls_config(tonic::transport::ServerTlsConfig::new().identity(identity))? + .add_service(svc) + .add_service(reflection_service) + .serve(listen_address) + .await + .map_err(|e| anyhow::anyhow!(e)) + })); + } + + futures::future::try_join_all(tasks).await?; + Ok(()) + } + + fn get_server_name(&self) -> String { + SERVER_NAME.to_string() + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/lib.rs new file mode 100644 index 0000000000000..566941502a239 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/lib.rs @@ -0,0 +1,8 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +mod config; +mod metrics; +mod service; + +pub use config::{IndexerGrpcDataServiceConfig, NonTlsConfig, SERVER_NAME}; diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/main.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/main.rs new file mode 100644 index 0000000000000..265054ba3cddd --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/main.rs @@ -0,0 +1,17 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::Result; +use aptos_indexer_grpc_data_service::IndexerGrpcDataServiceConfig; +use aptos_indexer_grpc_server_framework::ServerArgs; +use clap::Parser; + +#[cfg(unix)] +#[global_allocator] +static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + +#[tokio::main] +async fn main() -> Result<()> { + let args = ServerArgs::parse(); + args.run::().await +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/metrics.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/metrics.rs new file mode 100644 index 0000000000000..4813efda1aed9 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/metrics.rs @@ -0,0 +1,164 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_metrics_core::{ + register_gauge_vec, register_int_counter_vec, register_int_gauge_vec, GaugeVec, IntCounterVec, + IntGaugeVec, +}; +use once_cell::sync::Lazy; + +// The `identifier` label at the time of writing (2024-04-08) is always the +// application ID, a globally unique ID. + +/// Latest processed transaction version. +pub static LATEST_PROCESSED_VERSION_PER_PROCESSOR: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "indexer_grpc_data_service_with_user_latest_processed_version", + "Latest processed transaction version", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Number of transactions that served by data service. +pub static PROCESSED_VERSIONS_COUNT_PER_PROCESSOR: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_with_user_processed_versions", + "Number of transactions that have been processed by data service", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Number of errors that data service has encountered. +pub static ERROR_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_error", + "Number of errors that data service has encountered", + &["error_type"] + ) + .unwrap() +}); + +/// Data latency for data service based on latest processed transaction based on selected processor. +pub static PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR: Lazy = Lazy::new(|| { + register_gauge_vec!( + "indexer_grpc_data_service_with_user_latest_data_latency_in_secs", + "Latency of data service based on latest processed transaction", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Count of connections that data service has established. +pub static CONNECTION_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_connection_count_v2", + "Count of connections that data service has established", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Count of the short connections; i.e., < 10 seconds. +pub static SHORT_CONNECTION_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_short_connection_by_user_processor_count", + "Count of the short connections; i.e., < 10 seconds", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Count of bytes transfered to the client. This only represents the bytes prepared and +/// ready to send to the client. This only t It does not represent the bytes actually +/// sent to the client. +/// +/// This is pre stripping, so it may include bytes for transactions that were later +/// stripped. See BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING for post +/// stirpping. +pub static BYTES_READY_TO_TRANSFER_FROM_SERVER: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_bytes_ready_to_transfer_from_server", + "Count of bytes ready to transfer to the client (pre stripping)", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); + +/// Count of bytes transfered to the client. This only represents the bytes prepared and +/// ready to send to the client. This only t It does not represent the bytes actually +/// sent to the client. +/// +/// This is post stripping, meaning some transactions may have been stripped (removing +/// things such as events, writesets, payload, signature). Compare this with +/// BYTES_READY_TO_TRANSFER_FROM_SERVER to see how many bytes were stripped. +pub static BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING: Lazy = + Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_bytes_ready_to_transfer_from_server_after_stripping", + "Count of bytes ready to transfer to the client (post stripping)", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() + }); + +/// The number of transactions that had data (such as events, writesets, payload, +/// signature) stripped from them due to the `txns_to_strip_filter`. See +/// `strip_transactions` for more. +pub static NUM_TRANSACTIONS_STRIPPED: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "indexer_grpc_data_service_num_transactions_stripped", + "Number of transactions that had data (such as events, writesets, payload, signature) stripped from them", + &[ + "identifier_type", + "identifier", + "email", + "application_name", + "processor" + ], + ) + .unwrap() +}); diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/service.rs new file mode 100644 index 0000000000000..f2faf42408631 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/old/service.rs @@ -0,0 +1,1319 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::metrics::{ + BYTES_READY_TO_TRANSFER_FROM_SERVER, BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING, + CONNECTION_COUNT, ERROR_COUNT, LATEST_PROCESSED_VERSION_PER_PROCESSOR, + NUM_TRANSACTIONS_STRIPPED, PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR, + PROCESSED_VERSIONS_COUNT_PER_PROCESSOR, SHORT_CONNECTION_COUNT, +}; +use anyhow::{Context, Result}; +use aptos_indexer_grpc_utils::{ + cache_operator::{CacheBatchGetStatus, CacheCoverageStatus, CacheOperator}, + chunk_transactions, + compression_util::{CacheEntry, StorageFormat}, + config::IndexerGrpcFileStoreConfig, + constants::{ + IndexerGrpcRequestMetadata, GRPC_AUTH_TOKEN_HEADER, GRPC_REQUEST_NAME_HEADER, + MESSAGE_SIZE_LIMIT, REQUEST_HEADER_APTOS_APPLICATION_NAME, REQUEST_HEADER_APTOS_EMAIL, + REQUEST_HEADER_APTOS_IDENTIFIER, REQUEST_HEADER_APTOS_IDENTIFIER_TYPE, + }, + counters::{log_grpc_step, IndexerGrpcStep, NUM_MULTI_FETCH_OVERLAPPED_VERSIONS}, + file_store_operator::FileStoreOperator, + in_memory_cache::InMemoryCache, + time_diff_since_pb_timestamp_in_secs, + types::RedisUrl, +}; +use aptos_moving_average::MovingAverage; +use aptos_protos::{ + indexer::v1::{raw_data_server::RawData, GetTransactionsRequest, TransactionsResponse}, + transaction::v1::{transaction::TxnData, Transaction}, +}; +use aptos_transaction_filter::{BooleanTransactionFilter, Filterable}; +use futures::Stream; +use prost::Message; +use redis::Client; +use std::{ + collections::HashMap, + pin::Pin, + str::FromStr, + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::sync::mpsc::{channel, error::SendTimeoutError}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{Request, Response, Status}; +use tracing::{error, info, warn}; +use uuid::Uuid; + +type ResponseStream = Pin> + Send>>; + +const MOVING_AVERAGE_WINDOW_SIZE: u64 = 10_000; +// When trying to fetch beyond the current head of cache, the server will retry after this duration. +const AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS: u64 = 50; +// When error happens when fetching data from cache and file store, the server will retry after this duration. +// TODO(larry): fix all errors treated as transient errors. +const TRANSIENT_DATA_ERROR_RETRY_SLEEP_DURATION_MS: u64 = 1000; +// This is the time we wait for the file store to be ready. It should only be +// kicked off when there's no metadata in the file store. +const FILE_STORE_METADATA_WAIT_MS: u64 = 2000; + +// The server will retry to send the response to the client and give up after RESPONSE_CHANNEL_SEND_TIMEOUT. +// This is to prevent the server from being occupied by a slow client. +const RESPONSE_CHANNEL_SEND_TIMEOUT: Duration = Duration::from_secs(120); + +const SHORT_CONNECTION_DURATION_IN_SECS: u64 = 10; + +const RESPONSE_HEADER_APTOS_CONNECTION_ID_HEADER: &str = "x-aptos-connection-id"; +const SERVICE_TYPE: &str = "data_service"; + +// Number of times to retry fetching a given txn block from the stores +pub const NUM_DATA_FETCH_RETRIES: u8 = 5; + +// Max number of tasks to reach out to TXN stores with +const MAX_FETCH_TASKS_PER_REQUEST: u64 = 5; +// The number of transactions we store per txn block; this is used to determine max num of tasks +const TRANSACTIONS_PER_STORAGE_BLOCK: u64 = 1000; + +pub struct RawDataServerWrapper { + pub redis_client: Arc, + pub file_store_config: IndexerGrpcFileStoreConfig, + pub data_service_response_channel_size: usize, + pub txns_to_strip_filter: BooleanTransactionFilter, + pub cache_storage_format: StorageFormat, + in_memory_cache: Arc, +} + +// Exclude in_memory-cache +impl std::fmt::Debug for RawDataServerWrapper { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RawDataServerWrapper") + .field("redis_client", &"Arc") + .field("file_store_config", &self.file_store_config) + .field( + "data_service_response_channel_size", + &self.data_service_response_channel_size, + ) + .field("txns_to_strip_filter", &self.txns_to_strip_filter) + .field("cache_storage_format", &self.cache_storage_format) + .finish() + } +} + +impl RawDataServerWrapper { + pub fn new( + redis_address: RedisUrl, + file_store_config: IndexerGrpcFileStoreConfig, + data_service_response_channel_size: usize, + txns_to_strip_filter: BooleanTransactionFilter, + cache_storage_format: StorageFormat, + in_memory_cache: Arc, + ) -> anyhow::Result { + Ok(Self { + redis_client: Arc::new( + redis::Client::open(redis_address.0.clone()).with_context(|| { + format!("Failed to create redis client for {}", redis_address) + })?, + ), + file_store_config, + data_service_response_channel_size, + txns_to_strip_filter, + cache_storage_format, + in_memory_cache, + }) + } +} + +/// Enum to represent the status of the data fetching overall. +enum TransactionsDataStatus { + // Data fetching is successful. + Success(Vec), + // Ahead of current head of cache. + AheadOfCache, +} + +/// RawDataServerWrapper handles the get transactions requests from cache and file store. +#[tonic::async_trait] +impl RawData for RawDataServerWrapper { + type GetTransactionsStream = ResponseStream; + + /// GetTransactionsStream is a streaming GRPC endpoint: + /// 1. Fetches data from cache and file store. + /// 1.1. If the data is beyond the current head of cache, retry after a short sleep. + /// 1.2. If the data is not in cache, fetch the data from file store. + /// 1.3. If the data is not in file store, stream connection will break. + /// 1.4 If error happens, retry after a short sleep. + /// 2. Push data into channel to stream to the client. + /// 2.1. If the channel is full, do not fetch and retry after a short sleep. + async fn get_transactions( + &self, + req: Request, + ) -> Result, Status> { + // Get request identity. The request is already authenticated by the interceptor. + let request_metadata = match get_request_metadata(&req) { + Ok(request_metadata) => request_metadata, + _ => return Result::Err(Status::aborted("Invalid request token")), + }; + CONNECTION_COUNT + .with_label_values(&request_metadata.get_label_values()) + .inc(); + let request = req.into_inner(); + + let transactions_count = request.transactions_count; + + // Response channel to stream the data to the client. + let (tx, rx) = channel(self.data_service_response_channel_size); + let current_version = match &request.starting_version { + Some(version) => *version, + // Live mode if starting version isn't specified + None => self + .in_memory_cache + .latest_version() + .await + .saturating_sub(1), + }; + + let file_store_operator: Box = self.file_store_config.create(); + let file_store_operator = Arc::new(file_store_operator); + + // Adds tracing context for the request. + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceNewRequestReceived, + Some(current_version as i64), + transactions_count.map(|v| (v as i64 + current_version as i64 - 1)), + None, + None, + None, + None, + None, + Some(&request_metadata), + ); + + let redis_client = self.redis_client.clone(); + let cache_storage_format = self.cache_storage_format; + let request_metadata = Arc::new(request_metadata); + let txns_to_strip_filter = self.txns_to_strip_filter.clone(); + let in_memory_cache = self.in_memory_cache.clone(); + tokio::spawn({ + let request_metadata = request_metadata.clone(); + async move { + data_fetcher_task( + redis_client, + file_store_operator, + cache_storage_format, + request_metadata, + transactions_count, + tx, + txns_to_strip_filter, + current_version, + in_memory_cache, + ) + .await; + } + }); + + let output_stream = ReceiverStream::new(rx); + let mut response = Response::new(Box::pin(output_stream) as Self::GetTransactionsStream); + + response.metadata_mut().insert( + RESPONSE_HEADER_APTOS_CONNECTION_ID_HEADER, + tonic::metadata::MetadataValue::from_str(&request_metadata.request_connection_id) + .unwrap(), + ); + Ok(response) + } +} + +enum DataFetchSubTaskResult { + BatchSuccess(Vec>), + Success(Vec), + NoResults, +} + +async fn get_data_with_tasks( + start_version: u64, + transactions_count: Option, + chain_id: u64, + cache_operator: &mut CacheOperator, + file_store_operator: Arc>, + request_metadata: Arc, + cache_storage_format: StorageFormat, + in_memory_cache: Arc, +) -> DataFetchSubTaskResult { + let start_time = Instant::now(); + let in_memory_transactions = in_memory_cache.get_transactions(start_version).await; + if !in_memory_transactions.is_empty() { + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceFetchingDataFromInMemoryCache, + Some(start_version as i64), + Some(in_memory_transactions.last().as_ref().unwrap().version as i64), + None, + None, + Some(start_time.elapsed().as_secs_f64()), + None, + Some(in_memory_transactions.len() as i64), + Some(&request_metadata), + ); + return DataFetchSubTaskResult::BatchSuccess(chunk_transactions( + in_memory_transactions, + MESSAGE_SIZE_LIMIT, + )); + } + let cache_coverage_status = cache_operator + .check_cache_coverage_status(start_version) + .await; + + let num_tasks_to_use = match cache_coverage_status { + Ok(CacheCoverageStatus::DataNotReady) => return DataFetchSubTaskResult::NoResults, + Ok(CacheCoverageStatus::CacheHit(_)) => 1, + Ok(CacheCoverageStatus::CacheEvicted) => match transactions_count { + None => MAX_FETCH_TASKS_PER_REQUEST, + Some(transactions_count) => { + let num_tasks = transactions_count / TRANSACTIONS_PER_STORAGE_BLOCK; + if num_tasks >= MAX_FETCH_TASKS_PER_REQUEST { + // Limit the max tasks to MAX_FETCH_TASKS_PER_REQUEST + MAX_FETCH_TASKS_PER_REQUEST + } else if num_tasks < 1 { + // Limit the min tasks to 1 + 1 + } else { + num_tasks + } + }, + }, + Err(_) => { + error!("[Data Service] Failed to get cache coverage status."); + panic!("Failed to get cache coverage status."); + }, + }; + + let mut tasks = tokio::task::JoinSet::new(); + let mut current_version = start_version; + + for _ in 0..num_tasks_to_use { + tasks.spawn({ + // TODO: arc this instead of cloning + let mut cache_operator = cache_operator.clone(); + let file_store_operator = file_store_operator.clone(); + let request_metadata = request_metadata.clone(); + async move { + get_data_in_task( + current_version, + chain_id, + &mut cache_operator, + file_store_operator, + request_metadata.clone(), + cache_storage_format, + ) + .await + } + }); + // Storage is in block of 1000: we align our current version fetch to the nearest block + current_version += TRANSACTIONS_PER_STORAGE_BLOCK; + current_version -= current_version % TRANSACTIONS_PER_STORAGE_BLOCK; + } + + let mut transactions: Vec> = vec![]; + while let Some(result) = tasks.join_next().await { + match result { + Ok(DataFetchSubTaskResult::Success(txns)) => { + transactions.push(txns); + }, + Ok(DataFetchSubTaskResult::NoResults) => {}, + Err(e) => { + error!( + error = e.to_string(), + "[Data Service] Failed to get data from cache and file store." + ); + panic!("Failed to get data from cache and file store."); + }, + Ok(_) => unreachable!("Fetching from a single task will never return a batch"), + } + } + + if transactions.is_empty() { + DataFetchSubTaskResult::NoResults + } else { + DataFetchSubTaskResult::BatchSuccess(transactions) + } +} + +async fn get_data_in_task( + start_version: u64, + chain_id: u64, + cache_operator: &mut CacheOperator, + file_store_operator: Arc>, + request_metadata: Arc, + cache_storage_format: StorageFormat, +) -> DataFetchSubTaskResult { + let current_batch_start_time = std::time::Instant::now(); + + let fetched = data_fetch( + start_version, + cache_operator, + file_store_operator, + request_metadata.clone(), + cache_storage_format, + ); + + let transaction_data = match fetched.await { + Ok(TransactionsDataStatus::Success(transactions)) => transactions, + Ok(TransactionsDataStatus::AheadOfCache) => { + info!( + start_version = start_version, + request_identifier = request_metadata.request_identifier.as_str(), + processor_name = request_metadata.processor_name.as_str(), + connection_id = request_metadata.request_connection_id.as_str(), + duration_in_secs = current_batch_start_time.elapsed().as_secs_f64(), + service_type = SERVICE_TYPE, + "[Data Service] Requested data is ahead of cache. Sleeping for {} ms.", + AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS, + ); + ahead_of_cache_data_handling().await; + // Retry after a short sleep. + return DataFetchSubTaskResult::NoResults; + }, + Err(e) => { + ERROR_COUNT.with_label_values(&["data_fetch_failed"]).inc(); + data_fetch_error_handling(e, start_version, chain_id).await; + // Retry after a short sleep. + return DataFetchSubTaskResult::NoResults; + }, + }; + DataFetchSubTaskResult::Success(transaction_data) +} + +// This is a task spawned off for servicing a users' request +async fn data_fetcher_task( + redis_client: Arc, + file_store_operator: Arc>, + cache_storage_format: StorageFormat, + request_metadata: Arc, + transactions_count: Option, + tx: tokio::sync::mpsc::Sender>, + txns_to_strip_filter: BooleanTransactionFilter, + mut current_version: u64, + in_memory_cache: Arc, +) { + let mut connection_start_time = Some(std::time::Instant::now()); + let mut transactions_count = transactions_count; + + // Establish redis connection + let conn = match redis_client.get_tokio_connection_manager().await { + Ok(conn) => conn, + Err(e) => { + ERROR_COUNT + .with_label_values(&["redis_connection_failed"]) + .inc(); + // Connection will be dropped anyway, so we ignore the error here. + let _result = tx + .send_timeout( + Err(Status::unavailable( + "[Data Service] Cannot connect to Redis; please retry.", + )), + RESPONSE_CHANNEL_SEND_TIMEOUT, + ) + .await; + error!( + error = e.to_string(), + "[Data Service] Failed to get redis connection." + ); + return; + }, + }; + let mut cache_operator = CacheOperator::new(conn, cache_storage_format); + + // Validate chain id + let mut metadata = file_store_operator.get_file_store_metadata().await; + while metadata.is_none() { + metadata = file_store_operator.get_file_store_metadata().await; + tracing::warn!( + "[File worker] File store metadata not found. Waiting for {} ms.", + FILE_STORE_METADATA_WAIT_MS + ); + tokio::time::sleep(std::time::Duration::from_millis( + FILE_STORE_METADATA_WAIT_MS, + )) + .await; + } + + let metadata_chain_id = metadata.unwrap().chain_id; + + // Validate redis chain id. Must be present by the time it gets here + let chain_id = match cache_operator.get_chain_id().await { + Ok(chain_id) => chain_id.unwrap(), + Err(e) => { + ERROR_COUNT + .with_label_values(&["redis_get_chain_id_failed"]) + .inc(); + // Connection will be dropped anyway, so we ignore the error here. + let _result = tx + .send_timeout( + Err(Status::unavailable( + "[Data Service] Cannot get the chain id from redis; please retry.", + )), + RESPONSE_CHANNEL_SEND_TIMEOUT, + ) + .await; + error!( + error = e.to_string(), + "[Data Service] Failed to get chain id from redis." + ); + return; + }, + }; + + if metadata_chain_id != chain_id { + let _result = tx + .send_timeout( + Err(Status::unavailable("[Data Service] Chain ID mismatch.")), + RESPONSE_CHANNEL_SEND_TIMEOUT, + ) + .await; + error!("[Data Service] Chain ID mismatch.",); + return; + } + + // Data service metrics. + let mut tps_calculator = MovingAverage::new(MOVING_AVERAGE_WINDOW_SIZE); + + loop { + // 1. Fetch data from cache and file store. + let transaction_data = match get_data_with_tasks( + current_version, + transactions_count, + chain_id, + &mut cache_operator, + file_store_operator.clone(), + request_metadata.clone(), + cache_storage_format, + in_memory_cache.clone(), + ) + .await + { + DataFetchSubTaskResult::BatchSuccess(txns) => txns, + DataFetchSubTaskResult::Success(_) => { + unreachable!("Fetching from multiple tasks will never return a single vector") + }, + DataFetchSubTaskResult::NoResults => continue, + }; + + let mut transaction_data = ensure_sequential_transactions(transaction_data); + + // TODO: Unify the truncation logic for start and end. + if let Some(count) = transactions_count { + if count == 0 { + // End the data stream. + // Since the client receives all the data it requested, we don't count it as a short connection. + connection_start_time = None; + break; + } else if (count as usize) < transaction_data.len() { + // Trim the data to the requested end version. + transaction_data.truncate(count as usize); + transactions_count = Some(0); + } else { + transactions_count = Some(count - transaction_data.len() as u64); + } + }; + // Note: this is the protobuf encoded transaction size. + let bytes_ready_to_transfer = transaction_data + .iter() + .map(|t| t.encoded_len()) + .sum::(); + BYTES_READY_TO_TRANSFER_FROM_SERVER + .with_label_values(&request_metadata.get_label_values()) + .inc_by(bytes_ready_to_transfer as u64); + // 2. Push the data to the response channel, i.e. stream the data to the client. + let current_batch_size = transaction_data.as_slice().len(); + let end_of_batch_version = transaction_data.as_slice().last().unwrap().version; + let (resp_items, num_stripped) = get_transactions_responses_builder( + transaction_data, + chain_id as u32, + &txns_to_strip_filter, + ); + NUM_TRANSACTIONS_STRIPPED + .with_label_values(&request_metadata.get_label_values()) + .inc_by(num_stripped as u64); + let bytes_ready_to_transfer_after_stripping = resp_items + .iter() + .flat_map(|response| &response.transactions) + .map(|t| t.encoded_len()) + .sum::(); + BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING + .with_label_values(&request_metadata.get_label_values()) + .inc_by(bytes_ready_to_transfer_after_stripping as u64); + let data_latency_in_secs = resp_items + .last() + .unwrap() + .transactions + .last() + .unwrap() + .timestamp + .as_ref() + .map(time_diff_since_pb_timestamp_in_secs); + + match channel_send_multiple_with_timeout(resp_items, tx.clone(), request_metadata.clone()) + .await + { + Ok(_) => { + // TODO: Reasses whether this metric is useful. + LATEST_PROCESSED_VERSION_PER_PROCESSOR + .with_label_values(&request_metadata.get_label_values()) + .set(end_of_batch_version as i64); + PROCESSED_VERSIONS_COUNT_PER_PROCESSOR + .with_label_values(&request_metadata.get_label_values()) + .inc_by(current_batch_size as u64); + if let Some(data_latency_in_secs) = data_latency_in_secs { + PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR + .with_label_values(&request_metadata.get_label_values()) + .set(data_latency_in_secs); + } + }, + Err(SendTimeoutError::Timeout(_)) => { + warn!("[Data Service] Receiver is full; exiting."); + break; + }, + Err(SendTimeoutError::Closed(_)) => { + warn!("[Data Service] Receiver is closed; exiting."); + break; + }, + } + // 3. Update the current version and record current tps. + tps_calculator.tick_now(current_batch_size as u64); + current_version = end_of_batch_version + 1; + } + info!( + request_identifier = request_metadata.request_identifier.as_str(), + processor_name = request_metadata.processor_name.as_str(), + connection_id = request_metadata.request_connection_id.as_str(), + service_type = SERVICE_TYPE, + "[Data Service] Client disconnected." + ); + if let Some(start_time) = connection_start_time { + if start_time.elapsed().as_secs() < SHORT_CONNECTION_DURATION_IN_SECS { + SHORT_CONNECTION_COUNT + .with_label_values(&request_metadata.get_label_values()) + .inc(); + } + } +} + +/// Takes in multiple batches of transactions, and: +/// 1. De-dupes in the case of overlap (but log to prom metric) +/// 2. Panics in cases of gaps +fn ensure_sequential_transactions(mut batches: Vec>) -> Vec { + // If there's only one, no sorting required + if batches.len() == 1 { + return batches.pop().unwrap(); + } + + // Sort by the first version per batch, ascending + batches.sort_by(|a, b| a.first().unwrap().version.cmp(&b.first().unwrap().version)); + let first_version = batches.first().unwrap().first().unwrap().version; + let last_version = batches.last().unwrap().last().unwrap().version; + let mut transactions: Vec = vec![]; + + let mut prev_start = None; + let mut prev_end = None; + for mut batch in batches { + let mut start_version = batch.first().unwrap().version; + let end_version = batch.last().unwrap().version; + if prev_start.is_some() { + let prev_start = prev_start.unwrap(); + let prev_end = prev_end.unwrap(); + // If this batch is fully contained within the previous batch, skip it + if prev_start <= start_version && prev_end >= end_version { + NUM_MULTI_FETCH_OVERLAPPED_VERSIONS + .with_label_values(&[SERVICE_TYPE, "full"]) + .inc_by(end_version - start_version); + continue; + } + // If this batch overlaps with the previous batch, combine them + if prev_end >= start_version { + NUM_MULTI_FETCH_OVERLAPPED_VERSIONS + .with_label_values(&[SERVICE_TYPE, "partial"]) + .inc_by(prev_end - start_version + 1); + tracing::debug!( + batch_first_version = first_version, + batch_last_version = last_version, + start_version = start_version, + end_version = end_version, + prev_start = ?prev_start, + prev_end = prev_end, + "[Filestore] Overlapping version data" + ); + batch.drain(0..(prev_end - start_version + 1) as usize); + start_version = batch.first().unwrap().version; + } + + // Otherwise there is a gap + if prev_end + 1 != start_version { + NUM_MULTI_FETCH_OVERLAPPED_VERSIONS + .with_label_values(&[SERVICE_TYPE, "gap"]) + .inc_by(prev_end - start_version + 1); + + tracing::error!( + batch_first_version = first_version, + batch_last_version = last_version, + start_version = start_version, + end_version = end_version, + prev_start = ?prev_start, + prev_end = prev_end, + "[Filestore] Gaps or dupes in processing version data" + ); + panic!("[Filestore] Gaps in processing data batch_first_version: {}, batch_last_version: {}, start_version: {}, end_version: {}, prev_start: {:?}, prev_end: {:?}", + first_version, + last_version, + start_version, + end_version, + prev_start, + prev_end, + ); + } + } + + prev_start = Some(start_version); + prev_end = Some(end_version); + transactions.extend(batch); + } + + transactions +} + +/// Builds the response for the get transactions request. Partial batch is ok, i.e., a +/// batch with transactions < 1000. +/// +/// It also returns the number of txns that were stripped. +fn get_transactions_responses_builder( + transactions: Vec, + chain_id: u32, + txns_to_strip_filter: &BooleanTransactionFilter, +) -> (Vec, usize) { + let (stripped_transactions, num_stripped) = + strip_transactions(transactions, txns_to_strip_filter); + let chunks = chunk_transactions(stripped_transactions, MESSAGE_SIZE_LIMIT); + let responses = chunks + .into_iter() + .map(|chunk| TransactionsResponse { + chain_id: Some(chain_id as u64), + transactions: chunk, + }) + .collect(); + (responses, num_stripped) +} + +// This is a CPU bound operation, so we spawn_blocking +async fn deserialize_cached_transactions( + transactions: Vec>, + storage_format: StorageFormat, +) -> anyhow::Result> { + let task = tokio::task::spawn_blocking(move || { + transactions + .into_iter() + .map(|transaction| { + let cache_entry = CacheEntry::new(transaction, storage_format); + cache_entry.into_transaction() + }) + .collect::>() + }) + .await; + task.context("Transaction bytes to CacheEntry deserialization task failed") +} + +/// Fetches data from cache or the file store. It returns the data if it is ready in the cache or file store. +/// Otherwise, it returns the status of the data fetching. +async fn data_fetch( + starting_version: u64, + cache_operator: &mut CacheOperator, + file_store_operator: Arc>, + request_metadata: Arc, + storage_format: StorageFormat, +) -> anyhow::Result { + let current_batch_start_time = std::time::Instant::now(); + let batch_get_result = cache_operator + .batch_get_encoded_proto_data(starting_version) + .await; + + match batch_get_result { + // Data is not ready yet in the cache. + Ok(CacheBatchGetStatus::NotReady) => Ok(TransactionsDataStatus::AheadOfCache), + Ok(CacheBatchGetStatus::Ok(transactions)) => { + let decoding_start_time = std::time::Instant::now(); + let size_in_bytes = transactions + .iter() + .map(|transaction| transaction.len()) + .sum::(); + let num_of_transactions = transactions.len(); + let duration_in_secs = current_batch_start_time.elapsed().as_secs_f64(); + + let transactions = + deserialize_cached_transactions(transactions, storage_format).await?; + let start_version_timestamp = transactions.first().unwrap().timestamp.as_ref(); + let end_version_timestamp = transactions.last().unwrap().timestamp.as_ref(); + + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceDataFetchedCache, + Some(starting_version as i64), + Some(starting_version as i64 + num_of_transactions as i64 - 1), + start_version_timestamp, + end_version_timestamp, + Some(duration_in_secs), + Some(size_in_bytes), + Some(num_of_transactions as i64), + Some(&request_metadata), + ); + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceTxnsDecoded, + Some(starting_version as i64), + Some(starting_version as i64 + num_of_transactions as i64 - 1), + start_version_timestamp, + end_version_timestamp, + Some(decoding_start_time.elapsed().as_secs_f64()), + Some(size_in_bytes), + Some(num_of_transactions as i64), + Some(&request_metadata), + ); + + Ok(TransactionsDataStatus::Success(transactions)) + }, + Ok(CacheBatchGetStatus::EvictedFromCache) => { + let transactions = + data_fetch_from_filestore(starting_version, file_store_operator, request_metadata) + .await?; + Ok(TransactionsDataStatus::Success(transactions)) + }, + Err(e) => Err(e), + } +} + +async fn data_fetch_from_filestore( + starting_version: u64, + file_store_operator: Arc>, + request_metadata: Arc, +) -> anyhow::Result> { + // Data is evicted from the cache. Fetch from file store. + let (transactions, io_duration, decoding_duration) = file_store_operator + .get_transactions_with_durations(starting_version, NUM_DATA_FETCH_RETRIES) + .await?; + let size_in_bytes = transactions + .iter() + .map(|transaction| transaction.encoded_len()) + .sum::(); + let num_of_transactions = transactions.len(); + let start_version_timestamp = transactions.first().unwrap().timestamp.as_ref(); + let end_version_timestamp = transactions.last().unwrap().timestamp.as_ref(); + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceDataFetchedFilestore, + Some(starting_version as i64), + Some(starting_version as i64 + num_of_transactions as i64 - 1), + start_version_timestamp, + end_version_timestamp, + Some(io_duration), + Some(size_in_bytes), + Some(num_of_transactions as i64), + Some(&request_metadata), + ); + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceTxnsDecoded, + Some(starting_version as i64), + Some(starting_version as i64 + num_of_transactions as i64 - 1), + start_version_timestamp, + end_version_timestamp, + Some(decoding_duration), + Some(size_in_bytes), + Some(num_of_transactions as i64), + Some(&request_metadata), + ); + Ok(transactions) +} + +/// Handles the case when the data is not ready in the cache, i.e., beyond the current head. +async fn ahead_of_cache_data_handling() { + // TODO: add exponential backoff. + tokio::time::sleep(Duration::from_millis( + AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS, + )) + .await; +} + +/// Handles data fetch errors, including cache and file store related errors. +async fn data_fetch_error_handling(err: anyhow::Error, current_version: u64, chain_id: u64) { + error!( + chain_id = chain_id, + current_version = current_version, + "[Data Service] Failed to fetch data from cache and file store. {:?}", + err + ); + tokio::time::sleep(Duration::from_millis( + TRANSIENT_DATA_ERROR_RETRY_SLEEP_DURATION_MS, + )) + .await; +} + +/// Gets the request metadata. Useful for logging. +fn get_request_metadata( + req: &Request, +) -> tonic::Result { + let request_metadata_pairs = vec![ + ( + "request_identifier_type", + REQUEST_HEADER_APTOS_IDENTIFIER_TYPE, + ), + ("request_identifier", REQUEST_HEADER_APTOS_IDENTIFIER), + ("request_email", REQUEST_HEADER_APTOS_EMAIL), + ( + "request_application_name", + REQUEST_HEADER_APTOS_APPLICATION_NAME, + ), + ("request_token", GRPC_AUTH_TOKEN_HEADER), + ("processor_name", GRPC_REQUEST_NAME_HEADER), + ]; + let mut request_metadata_map: HashMap = request_metadata_pairs + .into_iter() + .map(|(key, value)| { + ( + key.to_string(), + req.metadata() + .get(value) + .map(|value| value.to_str().unwrap_or("unspecified").to_string()) + .unwrap_or("unspecified".to_string()), + ) + }) + .collect(); + request_metadata_map.insert( + "request_connection_id".to_string(), + Uuid::new_v4().to_string(), + ); + let request_metadata: IndexerGrpcRequestMetadata = + serde_json::from_str(&serde_json::to_string(&request_metadata_map).unwrap()).unwrap(); + // TODO: update the request name if these are internal requests. + Ok(request_metadata) +} + +async fn channel_send_multiple_with_timeout( + resp_items: Vec, + tx: tokio::sync::mpsc::Sender>, + request_metadata: Arc, +) -> Result<(), SendTimeoutError>> { + let overall_send_start_time = Instant::now(); + let overall_size_in_bytes = resp_items + .iter() + .map(|resp_item| resp_item.encoded_len()) + .sum::(); + let overall_start_txn = resp_items.first().unwrap().transactions.first().unwrap(); + let overall_end_txn = resp_items.last().unwrap().transactions.last().unwrap(); + let overall_start_version = overall_start_txn.version; + let overall_end_version = overall_end_txn.version; + let overall_start_txn_timestamp = overall_start_txn.clone().timestamp; + let overall_end_txn_timestamp = overall_end_txn.clone().timestamp; + + for resp_item in resp_items { + let send_start_time = Instant::now(); + let response_size = resp_item.encoded_len(); + let num_of_transactions = resp_item.transactions.len(); + let start_version = resp_item.transactions.first().unwrap().version; + let end_version = resp_item.transactions.last().unwrap().version; + let start_version_txn_timestamp = resp_item + .transactions + .first() + .unwrap() + .timestamp + .as_ref() + .unwrap(); + let end_version_txn_timestamp = resp_item + .transactions + .last() + .unwrap() + .timestamp + .as_ref() + .unwrap(); + + tx.send_timeout( + Result::::Ok(resp_item.clone()), + RESPONSE_CHANNEL_SEND_TIMEOUT, + ) + .await?; + + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceChunkSent, + Some(start_version as i64), + Some(end_version as i64), + Some(start_version_txn_timestamp), + Some(end_version_txn_timestamp), + Some(send_start_time.elapsed().as_secs_f64()), + Some(response_size), + Some(num_of_transactions as i64), + Some(&request_metadata), + ); + } + + log_grpc_step( + SERVICE_TYPE, + IndexerGrpcStep::DataServiceAllChunksSent, + Some(overall_start_version as i64), + Some(overall_end_version as i64), + overall_start_txn_timestamp.as_ref(), + overall_end_txn_timestamp.as_ref(), + Some(overall_send_start_time.elapsed().as_secs_f64()), + Some(overall_size_in_bytes), + Some((overall_end_version - overall_start_version + 1) as i64), + Some(&request_metadata), + ); + + Ok(()) +} + +/// This function strips transactions that match the given filter. Stripping means we +/// remove the payload, signature, events, and writesets. Note, the filter can be +/// composed of many conditions, see `BooleanTransactionFilter` for more. +/// +/// This returns the mutated txns and the number of txns that were stripped. +fn strip_transactions( + transactions: Vec, + txns_to_strip_filter: &BooleanTransactionFilter, +) -> (Vec, usize) { + let mut stripped_count = 0; + + let stripped_transactions: Vec = transactions + .into_iter() + .map(|mut txn| { + // Note: `is_allowed` means the txn matches the filter, in which case + // we strip it. + if txns_to_strip_filter.is_allowed(&txn) { + stripped_count += 1; + if let Some(info) = txn.info.as_mut() { + info.changes = vec![]; + } + if let Some(TxnData::User(user_transaction)) = txn.txn_data.as_mut() { + user_transaction.events = vec![]; + if let Some(utr) = user_transaction.request.as_mut() { + // Wipe the payload and signature. + utr.payload = None; + utr.signature = None; + } + } + } + txn + }) + .collect(); + + (stripped_transactions, stripped_count) +} + +#[cfg(test)] +mod tests { + use super::*; + use aptos_protos::transaction::v1::{ + transaction::TxnData, transaction_payload::Payload, EntryFunctionId, EntryFunctionPayload, + Event, MoveModuleId, Signature, Transaction, TransactionInfo, TransactionPayload, + UserTransaction, UserTransactionRequest, WriteSetChange, + }; + use aptos_transaction_filter::{ + boolean_transaction_filter::APIFilter, filters::UserTransactionFilterBuilder, + EntryFunctionFilterBuilder, UserTransactionPayloadFilterBuilder, + }; + + fn create_test_transaction( + module_address: String, + module_name: String, + function_name: String, + ) -> Transaction { + Transaction { + version: 1, + txn_data: Some(TxnData::User(UserTransaction { + request: Some(UserTransactionRequest { + payload: Some(TransactionPayload { + r#type: 1, + payload: Some(Payload::EntryFunctionPayload(EntryFunctionPayload { + function: Some(EntryFunctionId { + module: Some(MoveModuleId { + address: module_address, + name: module_name, + }), + name: function_name, + }), + ..Default::default() + })), + }), + signature: Some(Signature::default()), + ..Default::default() + }), + events: vec![Event::default()], + })), + info: Some(TransactionInfo { + changes: vec![WriteSetChange::default()], + ..Default::default() + }), + ..Default::default() + } + } + + #[test] + fn test_ensure_sequential_transactions_merges_and_sorts() { + let transactions1 = (1..5) + .map(|i| Transaction { + version: i, + ..Default::default() + }) + .collect(); + let transactions2 = (5..10) + .map(|i| Transaction { + version: i, + ..Default::default() + }) + .collect(); + // No overlap, just normal fetching flow + let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); + assert_eq!(transactions1.len(), 9); + assert_eq!(transactions1.first().unwrap().version, 1); + assert_eq!(transactions1.last().unwrap().version, 9); + + // This is a full overlap + let transactions2 = (5..7) + .map(|i| Transaction { + version: i, + ..Default::default() + }) + .collect(); + let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); + assert_eq!(transactions1.len(), 9); + assert_eq!(transactions1.first().unwrap().version, 1); + assert_eq!(transactions1.last().unwrap().version, 9); + + // Partial overlap + let transactions2 = (5..12) + .map(|i| Transaction { + version: i, + ..Default::default() + }) + .collect(); + let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); + assert_eq!(transactions1.len(), 11); + assert_eq!(transactions1.first().unwrap().version, 1); + assert_eq!(transactions1.last().unwrap().version, 11); + } + + const MODULE_ADDRESS: &str = "0x1234"; + const MODULE_NAME: &str = "module"; + const FUNCTION_NAME: &str = "function"; + + #[test] + fn test_transactions_are_stripped_correctly_sender_addresses() { + let sender_address = "0x1234".to_string(); + // Create a transaction with a user transaction + let txn = Transaction { + version: 1, + txn_data: Some(TxnData::User(UserTransaction { + request: Some(UserTransactionRequest { + sender: sender_address.clone(), + payload: Some(TransactionPayload::default()), + signature: Some(Signature::default()), + ..Default::default() + }), + events: vec![Event::default()], + })), + info: Some(TransactionInfo { + changes: vec![WriteSetChange::default()], + ..Default::default() + }), + ..Default::default() + }; + + // Create filter for senders to ignore. + let sender_filters = vec![sender_address] + .into_iter() + .map(|address| { + BooleanTransactionFilter::from(APIFilter::UserTransactionFilter( + UserTransactionFilterBuilder::default() + .sender(address) + .build() + .unwrap(), + )) + }) + .collect(); + let filter = BooleanTransactionFilter::new_or(sender_filters); + + let (filtered_txns, num_stripped) = strip_transactions(vec![txn], &filter); + assert_eq!(num_stripped, 1); + assert_eq!(filtered_txns.len(), 1); + let txn = filtered_txns.first().unwrap(); + let user_transaction = match &txn.txn_data { + Some(TxnData::User(user_transaction)) => user_transaction, + _ => panic!("Expected user transaction"), + }; + assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); + assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); + assert_eq!(user_transaction.events.len(), 0); + assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); + } + + #[test] + fn test_transactions_are_stripped_correctly_module_address() { + let txn = create_test_transaction( + MODULE_ADDRESS.to_string(), + MODULE_NAME.to_string(), + FUNCTION_NAME.to_string(), + ); + // Testing filter with only address set + let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( + APIFilter::UserTransactionFilter( + UserTransactionFilterBuilder::default() + .payload( + UserTransactionPayloadFilterBuilder::default() + .function( + EntryFunctionFilterBuilder::default() + .address(MODULE_ADDRESS.to_string()) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ), + )]); + + let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); + assert_eq!(num_stripped, 1); + assert_eq!(filtered_txns.len(), 1); + let txn = filtered_txns.first().unwrap(); + let user_transaction = match &txn.txn_data { + Some(TxnData::User(user_transaction)) => user_transaction, + _ => panic!("Expected user transaction"), + }; + assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); + assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); + assert_eq!(user_transaction.events.len(), 0); + assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); + } + + #[test] + fn test_transactions_are_stripped_correctly_module_name() { + let txn = create_test_transaction( + MODULE_ADDRESS.to_string(), + MODULE_NAME.to_string(), + FUNCTION_NAME.to_string(), + ); + // Testing filter with only module set + let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( + APIFilter::UserTransactionFilter( + UserTransactionFilterBuilder::default() + .payload( + UserTransactionPayloadFilterBuilder::default() + .function( + EntryFunctionFilterBuilder::default() + .module(MODULE_NAME.to_string()) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ), + )]); + + let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); + assert_eq!(num_stripped, 1); + assert_eq!(filtered_txns.len(), 1); + let txn = filtered_txns.first().unwrap(); + let user_transaction = match &txn.txn_data { + Some(TxnData::User(user_transaction)) => user_transaction, + _ => panic!("Expected user transaction"), + }; + assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); + assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); + assert_eq!(user_transaction.events.len(), 0); + assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); + } + + #[test] + fn test_transactions_are_stripped_correctly_function_name() { + let txn = create_test_transaction( + MODULE_ADDRESS.to_string(), + MODULE_NAME.to_string(), + FUNCTION_NAME.to_string(), + ); + // Testing filter with only function set + let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( + APIFilter::UserTransactionFilter( + UserTransactionFilterBuilder::default() + .payload( + UserTransactionPayloadFilterBuilder::default() + .function( + EntryFunctionFilterBuilder::default() + .function(FUNCTION_NAME.to_string()) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ), + )]); + + let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); + assert_eq!(num_stripped, 1); + assert_eq!(filtered_txns.len(), 1); + let txn = filtered_txns.first().unwrap(); + let user_transaction = match &txn.txn_data { + Some(TxnData::User(user_transaction)) => user_transaction, + _ => panic!("Expected user transaction"), + }; + assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); + assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); + assert_eq!(user_transaction.events.len(), 0); + assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); + } + #[test] + fn test_transactions_are_not_stripped() { + let txn = create_test_transaction( + MODULE_ADDRESS.to_string(), + MODULE_NAME.to_string(), + FUNCTION_NAME.to_string(), + ); + // Testing filter with wrong filter + let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( + APIFilter::UserTransactionFilter( + UserTransactionFilterBuilder::default() + .payload( + UserTransactionPayloadFilterBuilder::default() + .function( + EntryFunctionFilterBuilder::default() + .function("0xrandom".to_string()) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ) + .build() + .unwrap(), + ), + )]); + + let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); + assert_eq!(num_stripped, 0); + assert_eq!(filtered_txns.len(), 1); + let txn = filtered_txns.first().unwrap(); + let user_transaction = match &txn.txn_data { + Some(TxnData::User(user_transaction)) => user_transaction, + _ => panic!("Expected user transaction"), + }; + assert_ne!(user_transaction.request.as_ref().unwrap().payload, None); + assert_ne!(user_transaction.request.as_ref().unwrap().signature, None); + assert_ne!(user_transaction.events.len(), 0); + assert_ne!(txn.info.as_ref().unwrap().changes.len(), 0); + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs index f2faf42408631..2f73af9916149 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs @@ -1,1319 +1,163 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::metrics::{ - BYTES_READY_TO_TRANSFER_FROM_SERVER, BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING, - CONNECTION_COUNT, ERROR_COUNT, LATEST_PROCESSED_VERSION_PER_PROCESSOR, - NUM_TRANSACTIONS_STRIPPED, PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR, - PROCESSED_VERSIONS_COUNT_PER_PROCESSOR, SHORT_CONNECTION_COUNT, +use crate::connection_manager::ConnectionManager; +use anyhow::Result; +use aptos_indexer_grpc_utils::timestamp_now_proto; +use aptos_protos::indexer::v1::{ + data_service_server::DataService, raw_data_server::RawData, DataServiceInfo, + GetTransactionsRequest, PingDataServiceRequest, PingDataServiceResponse, StreamInfo, + TransactionsResponse, }; -use anyhow::{Context, Result}; -use aptos_indexer_grpc_utils::{ - cache_operator::{CacheBatchGetStatus, CacheCoverageStatus, CacheOperator}, - chunk_transactions, - compression_util::{CacheEntry, StorageFormat}, - config::IndexerGrpcFileStoreConfig, - constants::{ - IndexerGrpcRequestMetadata, GRPC_AUTH_TOKEN_HEADER, GRPC_REQUEST_NAME_HEADER, - MESSAGE_SIZE_LIMIT, REQUEST_HEADER_APTOS_APPLICATION_NAME, REQUEST_HEADER_APTOS_EMAIL, - REQUEST_HEADER_APTOS_IDENTIFIER, REQUEST_HEADER_APTOS_IDENTIFIER_TYPE, - }, - counters::{log_grpc_step, IndexerGrpcStep, NUM_MULTI_FETCH_OVERLAPPED_VERSIONS}, - file_store_operator::FileStoreOperator, - in_memory_cache::InMemoryCache, - time_diff_since_pb_timestamp_in_secs, - types::RedisUrl, -}; -use aptos_moving_average::MovingAverage; -use aptos_protos::{ - indexer::v1::{raw_data_server::RawData, GetTransactionsRequest, TransactionsResponse}, - transaction::v1::{transaction::TxnData, Transaction}, -}; -use aptos_transaction_filter::{BooleanTransactionFilter, Filterable}; -use futures::Stream; -use prost::Message; -use redis::Client; -use std::{ - collections::HashMap, - pin::Pin, - str::FromStr, - sync::Arc, - time::{Duration, Instant}, -}; -use tokio::sync::mpsc::{channel, error::SendTimeoutError}; +use futures::{Stream, StreamExt}; +use std::{pin::Pin, sync::Arc}; +use tokio::sync::mpsc::{channel, Sender}; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status}; -use tracing::{error, info, warn}; -use uuid::Uuid; type ResponseStream = Pin> + Send>>; -const MOVING_AVERAGE_WINDOW_SIZE: u64 = 10_000; -// When trying to fetch beyond the current head of cache, the server will retry after this duration. -const AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS: u64 = 50; -// When error happens when fetching data from cache and file store, the server will retry after this duration. -// TODO(larry): fix all errors treated as transient errors. -const TRANSIENT_DATA_ERROR_RETRY_SLEEP_DURATION_MS: u64 = 1000; -// This is the time we wait for the file store to be ready. It should only be -// kicked off when there's no metadata in the file store. -const FILE_STORE_METADATA_WAIT_MS: u64 = 2000; - -// The server will retry to send the response to the client and give up after RESPONSE_CHANNEL_SEND_TIMEOUT. -// This is to prevent the server from being occupied by a slow client. -const RESPONSE_CHANNEL_SEND_TIMEOUT: Duration = Duration::from_secs(120); - -const SHORT_CONNECTION_DURATION_IN_SECS: u64 = 10; - -const RESPONSE_HEADER_APTOS_CONNECTION_ID_HEADER: &str = "x-aptos-connection-id"; -const SERVICE_TYPE: &str = "data_service"; - -// Number of times to retry fetching a given txn block from the stores -pub const NUM_DATA_FETCH_RETRIES: u8 = 5; - -// Max number of tasks to reach out to TXN stores with -const MAX_FETCH_TASKS_PER_REQUEST: u64 = 5; -// The number of transactions we store per txn block; this is used to determine max num of tasks -const TRANSACTIONS_PER_STORAGE_BLOCK: u64 = 1000; - -pub struct RawDataServerWrapper { - pub redis_client: Arc, - pub file_store_config: IndexerGrpcFileStoreConfig, - pub data_service_response_channel_size: usize, - pub txns_to_strip_filter: BooleanTransactionFilter, - pub cache_storage_format: StorageFormat, - in_memory_cache: Arc, +// Note: For now we still allow starting both services together, so people don't have to rely on +// GrpcManager for routing. +pub struct DataServiceWrapperWrapper { + live_data_service: Option, + historical_data_service: Option, } -// Exclude in_memory-cache -impl std::fmt::Debug for RawDataServerWrapper { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("RawDataServerWrapper") - .field("redis_client", &"Arc") - .field("file_store_config", &self.file_store_config) - .field( - "data_service_response_channel_size", - &self.data_service_response_channel_size, - ) - .field("txns_to_strip_filter", &self.txns_to_strip_filter) - .field("cache_storage_format", &self.cache_storage_format) - .finish() - } -} - -impl RawDataServerWrapper { +impl DataServiceWrapperWrapper { pub fn new( - redis_address: RedisUrl, - file_store_config: IndexerGrpcFileStoreConfig, - data_service_response_channel_size: usize, - txns_to_strip_filter: BooleanTransactionFilter, - cache_storage_format: StorageFormat, - in_memory_cache: Arc, - ) -> anyhow::Result { - Ok(Self { - redis_client: Arc::new( - redis::Client::open(redis_address.0.clone()).with_context(|| { - format!("Failed to create redis client for {}", redis_address) - })?, - ), - file_store_config, - data_service_response_channel_size, - txns_to_strip_filter, - cache_storage_format, - in_memory_cache, - }) + live_data_service: Option, + historical_data_service: Option, + ) -> Self { + Self { + live_data_service, + historical_data_service, + } } } -/// Enum to represent the status of the data fetching overall. -enum TransactionsDataStatus { - // Data fetching is successful. - Success(Vec), - // Ahead of current head of cache. - AheadOfCache, -} - -/// RawDataServerWrapper handles the get transactions requests from cache and file store. #[tonic::async_trait] -impl RawData for RawDataServerWrapper { +impl DataService for DataServiceWrapperWrapper { type GetTransactionsStream = ResponseStream; - /// GetTransactionsStream is a streaming GRPC endpoint: - /// 1. Fetches data from cache and file store. - /// 1.1. If the data is beyond the current head of cache, retry after a short sleep. - /// 1.2. If the data is not in cache, fetch the data from file store. - /// 1.3. If the data is not in file store, stream connection will break. - /// 1.4 If error happens, retry after a short sleep. - /// 2. Push data into channel to stream to the client. - /// 2.1. If the channel is full, do not fetch and retry after a short sleep. async fn get_transactions( &self, req: Request, ) -> Result, Status> { - // Get request identity. The request is already authenticated by the interceptor. - let request_metadata = match get_request_metadata(&req) { - Ok(request_metadata) => request_metadata, - _ => return Result::Err(Status::aborted("Invalid request token")), - }; - CONNECTION_COUNT - .with_label_values(&request_metadata.get_label_values()) - .inc(); - let request = req.into_inner(); - - let transactions_count = request.transactions_count; - - // Response channel to stream the data to the client. - let (tx, rx) = channel(self.data_service_response_channel_size); - let current_version = match &request.starting_version { - Some(version) => *version, - // Live mode if starting version isn't specified - None => self - .in_memory_cache - .latest_version() - .await - .saturating_sub(1), - }; - - let file_store_operator: Box = self.file_store_config.create(); - let file_store_operator = Arc::new(file_store_operator); - - // Adds tracing context for the request. - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceNewRequestReceived, - Some(current_version as i64), - transactions_count.map(|v| (v as i64 + current_version as i64 - 1)), - None, - None, - None, - None, - None, - Some(&request_metadata), - ); - - let redis_client = self.redis_client.clone(); - let cache_storage_format = self.cache_storage_format; - let request_metadata = Arc::new(request_metadata); - let txns_to_strip_filter = self.txns_to_strip_filter.clone(); - let in_memory_cache = self.in_memory_cache.clone(); - tokio::spawn({ - let request_metadata = request_metadata.clone(); - async move { - data_fetcher_task( - redis_client, - file_store_operator, - cache_storage_format, - request_metadata, - transactions_count, - tx, - txns_to_strip_filter, - current_version, - in_memory_cache, - ) - .await; - } - }); - - let output_stream = ReceiverStream::new(rx); - let mut response = Response::new(Box::pin(output_stream) as Self::GetTransactionsStream); - - response.metadata_mut().insert( - RESPONSE_HEADER_APTOS_CONNECTION_ID_HEADER, - tonic::metadata::MetadataValue::from_str(&request_metadata.request_connection_id) - .unwrap(), - ); - Ok(response) - } -} - -enum DataFetchSubTaskResult { - BatchSuccess(Vec>), - Success(Vec), - NoResults, -} - -async fn get_data_with_tasks( - start_version: u64, - transactions_count: Option, - chain_id: u64, - cache_operator: &mut CacheOperator, - file_store_operator: Arc>, - request_metadata: Arc, - cache_storage_format: StorageFormat, - in_memory_cache: Arc, -) -> DataFetchSubTaskResult { - let start_time = Instant::now(); - let in_memory_transactions = in_memory_cache.get_transactions(start_version).await; - if !in_memory_transactions.is_empty() { - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceFetchingDataFromInMemoryCache, - Some(start_version as i64), - Some(in_memory_transactions.last().as_ref().unwrap().version as i64), - None, - None, - Some(start_time.elapsed().as_secs_f64()), - None, - Some(in_memory_transactions.len() as i64), - Some(&request_metadata), - ); - return DataFetchSubTaskResult::BatchSuccess(chunk_transactions( - in_memory_transactions, - MESSAGE_SIZE_LIMIT, - )); - } - let cache_coverage_status = cache_operator - .check_cache_coverage_status(start_version) - .await; - - let num_tasks_to_use = match cache_coverage_status { - Ok(CacheCoverageStatus::DataNotReady) => return DataFetchSubTaskResult::NoResults, - Ok(CacheCoverageStatus::CacheHit(_)) => 1, - Ok(CacheCoverageStatus::CacheEvicted) => match transactions_count { - None => MAX_FETCH_TASKS_PER_REQUEST, - Some(transactions_count) => { - let num_tasks = transactions_count / TRANSACTIONS_PER_STORAGE_BLOCK; - if num_tasks >= MAX_FETCH_TASKS_PER_REQUEST { - // Limit the max tasks to MAX_FETCH_TASKS_PER_REQUEST - MAX_FETCH_TASKS_PER_REQUEST - } else if num_tasks < 1 { - // Limit the min tasks to 1 - 1 - } else { - num_tasks + if let Some(live_data_service) = self.live_data_service.as_ref() { + if let Some(historical_data_service) = self.historical_data_service.as_ref() { + let request = req.into_inner(); + let mut stream = live_data_service + .get_transactions(Request::new(request.clone())) + .await? + .into_inner(); + let peekable = std::pin::pin!(stream.as_mut().peekable()); + if let Some(Ok(_)) = peekable.peek().await { + return live_data_service + .get_transactions(Request::new(request.clone())) + .await; } - }, - }, - Err(_) => { - error!("[Data Service] Failed to get cache coverage status."); - panic!("Failed to get cache coverage status."); - }, - }; - - let mut tasks = tokio::task::JoinSet::new(); - let mut current_version = start_version; - - for _ in 0..num_tasks_to_use { - tasks.spawn({ - // TODO: arc this instead of cloning - let mut cache_operator = cache_operator.clone(); - let file_store_operator = file_store_operator.clone(); - let request_metadata = request_metadata.clone(); - async move { - get_data_in_task( - current_version, - chain_id, - &mut cache_operator, - file_store_operator, - request_metadata.clone(), - cache_storage_format, - ) - .await - } - }); - // Storage is in block of 1000: we align our current version fetch to the nearest block - current_version += TRANSACTIONS_PER_STORAGE_BLOCK; - current_version -= current_version % TRANSACTIONS_PER_STORAGE_BLOCK; - } - - let mut transactions: Vec> = vec![]; - while let Some(result) = tasks.join_next().await { - match result { - Ok(DataFetchSubTaskResult::Success(txns)) => { - transactions.push(txns); - }, - Ok(DataFetchSubTaskResult::NoResults) => {}, - Err(e) => { - error!( - error = e.to_string(), - "[Data Service] Failed to get data from cache and file store." - ); - panic!("Failed to get data from cache and file store."); - }, - Ok(_) => unreachable!("Fetching from a single task will never return a batch"), - } - } - - if transactions.is_empty() { - DataFetchSubTaskResult::NoResults - } else { - DataFetchSubTaskResult::BatchSuccess(transactions) - } -} - -async fn get_data_in_task( - start_version: u64, - chain_id: u64, - cache_operator: &mut CacheOperator, - file_store_operator: Arc>, - request_metadata: Arc, - cache_storage_format: StorageFormat, -) -> DataFetchSubTaskResult { - let current_batch_start_time = std::time::Instant::now(); - - let fetched = data_fetch( - start_version, - cache_operator, - file_store_operator, - request_metadata.clone(), - cache_storage_format, - ); - - let transaction_data = match fetched.await { - Ok(TransactionsDataStatus::Success(transactions)) => transactions, - Ok(TransactionsDataStatus::AheadOfCache) => { - info!( - start_version = start_version, - request_identifier = request_metadata.request_identifier.as_str(), - processor_name = request_metadata.processor_name.as_str(), - connection_id = request_metadata.request_connection_id.as_str(), - duration_in_secs = current_batch_start_time.elapsed().as_secs_f64(), - service_type = SERVICE_TYPE, - "[Data Service] Requested data is ahead of cache. Sleeping for {} ms.", - AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS, - ); - ahead_of_cache_data_handling().await; - // Retry after a short sleep. - return DataFetchSubTaskResult::NoResults; - }, - Err(e) => { - ERROR_COUNT.with_label_values(&["data_fetch_failed"]).inc(); - data_fetch_error_handling(e, start_version, chain_id).await; - // Retry after a short sleep. - return DataFetchSubTaskResult::NoResults; - }, - }; - DataFetchSubTaskResult::Success(transaction_data) -} - -// This is a task spawned off for servicing a users' request -async fn data_fetcher_task( - redis_client: Arc, - file_store_operator: Arc>, - cache_storage_format: StorageFormat, - request_metadata: Arc, - transactions_count: Option, - tx: tokio::sync::mpsc::Sender>, - txns_to_strip_filter: BooleanTransactionFilter, - mut current_version: u64, - in_memory_cache: Arc, -) { - let mut connection_start_time = Some(std::time::Instant::now()); - let mut transactions_count = transactions_count; - - // Establish redis connection - let conn = match redis_client.get_tokio_connection_manager().await { - Ok(conn) => conn, - Err(e) => { - ERROR_COUNT - .with_label_values(&["redis_connection_failed"]) - .inc(); - // Connection will be dropped anyway, so we ignore the error here. - let _result = tx - .send_timeout( - Err(Status::unavailable( - "[Data Service] Cannot connect to Redis; please retry.", - )), - RESPONSE_CHANNEL_SEND_TIMEOUT, - ) - .await; - error!( - error = e.to_string(), - "[Data Service] Failed to get redis connection." - ); - return; - }, - }; - let mut cache_operator = CacheOperator::new(conn, cache_storage_format); - - // Validate chain id - let mut metadata = file_store_operator.get_file_store_metadata().await; - while metadata.is_none() { - metadata = file_store_operator.get_file_store_metadata().await; - tracing::warn!( - "[File worker] File store metadata not found. Waiting for {} ms.", - FILE_STORE_METADATA_WAIT_MS - ); - tokio::time::sleep(std::time::Duration::from_millis( - FILE_STORE_METADATA_WAIT_MS, - )) - .await; - } - let metadata_chain_id = metadata.unwrap().chain_id; - - // Validate redis chain id. Must be present by the time it gets here - let chain_id = match cache_operator.get_chain_id().await { - Ok(chain_id) => chain_id.unwrap(), - Err(e) => { - ERROR_COUNT - .with_label_values(&["redis_get_chain_id_failed"]) - .inc(); - // Connection will be dropped anyway, so we ignore the error here. - let _result = tx - .send_timeout( - Err(Status::unavailable( - "[Data Service] Cannot get the chain id from redis; please retry.", - )), - RESPONSE_CHANNEL_SEND_TIMEOUT, - ) - .await; - error!( - error = e.to_string(), - "[Data Service] Failed to get chain id from redis." - ); - return; - }, - }; - - if metadata_chain_id != chain_id { - let _result = tx - .send_timeout( - Err(Status::unavailable("[Data Service] Chain ID mismatch.")), - RESPONSE_CHANNEL_SEND_TIMEOUT, - ) - .await; - error!("[Data Service] Chain ID mismatch.",); - return; - } - - // Data service metrics. - let mut tps_calculator = MovingAverage::new(MOVING_AVERAGE_WINDOW_SIZE); - - loop { - // 1. Fetch data from cache and file store. - let transaction_data = match get_data_with_tasks( - current_version, - transactions_count, - chain_id, - &mut cache_operator, - file_store_operator.clone(), - request_metadata.clone(), - cache_storage_format, - in_memory_cache.clone(), - ) - .await - { - DataFetchSubTaskResult::BatchSuccess(txns) => txns, - DataFetchSubTaskResult::Success(_) => { - unreachable!("Fetching from multiple tasks will never return a single vector") - }, - DataFetchSubTaskResult::NoResults => continue, - }; - - let mut transaction_data = ensure_sequential_transactions(transaction_data); - - // TODO: Unify the truncation logic for start and end. - if let Some(count) = transactions_count { - if count == 0 { - // End the data stream. - // Since the client receives all the data it requested, we don't count it as a short connection. - connection_start_time = None; - break; - } else if (count as usize) < transaction_data.len() { - // Trim the data to the requested end version. - transaction_data.truncate(count as usize); - transactions_count = Some(0); + historical_data_service + .get_transactions(Request::new(request.clone())) + .await } else { - transactions_count = Some(count - transaction_data.len() as u64); + live_data_service.get_transactions(req).await } - }; - // Note: this is the protobuf encoded transaction size. - let bytes_ready_to_transfer = transaction_data - .iter() - .map(|t| t.encoded_len()) - .sum::(); - BYTES_READY_TO_TRANSFER_FROM_SERVER - .with_label_values(&request_metadata.get_label_values()) - .inc_by(bytes_ready_to_transfer as u64); - // 2. Push the data to the response channel, i.e. stream the data to the client. - let current_batch_size = transaction_data.as_slice().len(); - let end_of_batch_version = transaction_data.as_slice().last().unwrap().version; - let (resp_items, num_stripped) = get_transactions_responses_builder( - transaction_data, - chain_id as u32, - &txns_to_strip_filter, - ); - NUM_TRANSACTIONS_STRIPPED - .with_label_values(&request_metadata.get_label_values()) - .inc_by(num_stripped as u64); - let bytes_ready_to_transfer_after_stripping = resp_items - .iter() - .flat_map(|response| &response.transactions) - .map(|t| t.encoded_len()) - .sum::(); - BYTES_READY_TO_TRANSFER_FROM_SERVER_AFTER_STRIPPING - .with_label_values(&request_metadata.get_label_values()) - .inc_by(bytes_ready_to_transfer_after_stripping as u64); - let data_latency_in_secs = resp_items - .last() - .unwrap() - .transactions - .last() - .unwrap() - .timestamp - .as_ref() - .map(time_diff_since_pb_timestamp_in_secs); - - match channel_send_multiple_with_timeout(resp_items, tx.clone(), request_metadata.clone()) - .await - { - Ok(_) => { - // TODO: Reasses whether this metric is useful. - LATEST_PROCESSED_VERSION_PER_PROCESSOR - .with_label_values(&request_metadata.get_label_values()) - .set(end_of_batch_version as i64); - PROCESSED_VERSIONS_COUNT_PER_PROCESSOR - .with_label_values(&request_metadata.get_label_values()) - .inc_by(current_batch_size as u64); - if let Some(data_latency_in_secs) = data_latency_in_secs { - PROCESSED_LATENCY_IN_SECS_PER_PROCESSOR - .with_label_values(&request_metadata.get_label_values()) - .set(data_latency_in_secs); - } - }, - Err(SendTimeoutError::Timeout(_)) => { - warn!("[Data Service] Receiver is full; exiting."); - break; - }, - Err(SendTimeoutError::Closed(_)) => { - warn!("[Data Service] Receiver is closed; exiting."); - break; - }, + } else if let Some(historical_data_service) = self.historical_data_service.as_ref() { + historical_data_service.get_transactions(req).await + } else { + unreachable!("Must have at least one of the data services enabled."); } - // 3. Update the current version and record current tps. - tps_calculator.tick_now(current_batch_size as u64); - current_version = end_of_batch_version + 1; } - info!( - request_identifier = request_metadata.request_identifier.as_str(), - processor_name = request_metadata.processor_name.as_str(), - connection_id = request_metadata.request_connection_id.as_str(), - service_type = SERVICE_TYPE, - "[Data Service] Client disconnected." - ); - if let Some(start_time) = connection_start_time { - if start_time.elapsed().as_secs() < SHORT_CONNECTION_DURATION_IN_SECS { - SHORT_CONNECTION_COUNT - .with_label_values(&request_metadata.get_label_values()) - .inc(); - } - } -} -/// Takes in multiple batches of transactions, and: -/// 1. De-dupes in the case of overlap (but log to prom metric) -/// 2. Panics in cases of gaps -fn ensure_sequential_transactions(mut batches: Vec>) -> Vec { - // If there's only one, no sorting required - if batches.len() == 1 { - return batches.pop().unwrap(); - } - - // Sort by the first version per batch, ascending - batches.sort_by(|a, b| a.first().unwrap().version.cmp(&b.first().unwrap().version)); - let first_version = batches.first().unwrap().first().unwrap().version; - let last_version = batches.last().unwrap().last().unwrap().version; - let mut transactions: Vec = vec![]; - - let mut prev_start = None; - let mut prev_end = None; - for mut batch in batches { - let mut start_version = batch.first().unwrap().version; - let end_version = batch.last().unwrap().version; - if prev_start.is_some() { - let prev_start = prev_start.unwrap(); - let prev_end = prev_end.unwrap(); - // If this batch is fully contained within the previous batch, skip it - if prev_start <= start_version && prev_end >= end_version { - NUM_MULTI_FETCH_OVERLAPPED_VERSIONS - .with_label_values(&[SERVICE_TYPE, "full"]) - .inc_by(end_version - start_version); - continue; - } - // If this batch overlaps with the previous batch, combine them - if prev_end >= start_version { - NUM_MULTI_FETCH_OVERLAPPED_VERSIONS - .with_label_values(&[SERVICE_TYPE, "partial"]) - .inc_by(prev_end - start_version + 1); - tracing::debug!( - batch_first_version = first_version, - batch_last_version = last_version, - start_version = start_version, - end_version = end_version, - prev_start = ?prev_start, - prev_end = prev_end, - "[Filestore] Overlapping version data" - ); - batch.drain(0..(prev_end - start_version + 1) as usize); - start_version = batch.first().unwrap().version; - } - - // Otherwise there is a gap - if prev_end + 1 != start_version { - NUM_MULTI_FETCH_OVERLAPPED_VERSIONS - .with_label_values(&[SERVICE_TYPE, "gap"]) - .inc_by(prev_end - start_version + 1); - - tracing::error!( - batch_first_version = first_version, - batch_last_version = last_version, - start_version = start_version, - end_version = end_version, - prev_start = ?prev_start, - prev_end = prev_end, - "[Filestore] Gaps or dupes in processing version data" - ); - panic!("[Filestore] Gaps in processing data batch_first_version: {}, batch_last_version: {}, start_version: {}, end_version: {}, prev_start: {:?}, prev_end: {:?}", - first_version, - last_version, - start_version, - end_version, - prev_start, - prev_end, - ); - } + async fn ping( + &self, + req: Request, + ) -> Result, Status> { + if let Some(live_data_service) = self.live_data_service.as_ref() { + live_data_service.ping(req).await + } else if let Some(historical_data_service) = self.historical_data_service.as_ref() { + historical_data_service.ping(req).await + } else { + unreachable!("Must have at least one of the data services enabled."); } - - prev_start = Some(start_version); - prev_end = Some(end_version); - transactions.extend(batch); } - - transactions } -/// Builds the response for the get transactions request. Partial batch is ok, i.e., a -/// batch with transactions < 1000. -/// -/// It also returns the number of txns that were stripped. -fn get_transactions_responses_builder( - transactions: Vec, - chain_id: u32, - txns_to_strip_filter: &BooleanTransactionFilter, -) -> (Vec, usize) { - let (stripped_transactions, num_stripped) = - strip_transactions(transactions, txns_to_strip_filter); - let chunks = chunk_transactions(stripped_transactions, MESSAGE_SIZE_LIMIT); - let responses = chunks - .into_iter() - .map(|chunk| TransactionsResponse { - chain_id: Some(chain_id as u64), - transactions: chunk, - }) - .collect(); - (responses, num_stripped) -} - -// This is a CPU bound operation, so we spawn_blocking -async fn deserialize_cached_transactions( - transactions: Vec>, - storage_format: StorageFormat, -) -> anyhow::Result> { - let task = tokio::task::spawn_blocking(move || { - transactions - .into_iter() - .map(|transaction| { - let cache_entry = CacheEntry::new(transaction, storage_format); - cache_entry.into_transaction() - }) - .collect::>() - }) - .await; - task.context("Transaction bytes to CacheEntry deserialization task failed") -} - -/// Fetches data from cache or the file store. It returns the data if it is ready in the cache or file store. -/// Otherwise, it returns the status of the data fetching. -async fn data_fetch( - starting_version: u64, - cache_operator: &mut CacheOperator, - file_store_operator: Arc>, - request_metadata: Arc, - storage_format: StorageFormat, -) -> anyhow::Result { - let current_batch_start_time = std::time::Instant::now(); - let batch_get_result = cache_operator - .batch_get_encoded_proto_data(starting_version) - .await; - - match batch_get_result { - // Data is not ready yet in the cache. - Ok(CacheBatchGetStatus::NotReady) => Ok(TransactionsDataStatus::AheadOfCache), - Ok(CacheBatchGetStatus::Ok(transactions)) => { - let decoding_start_time = std::time::Instant::now(); - let size_in_bytes = transactions - .iter() - .map(|transaction| transaction.len()) - .sum::(); - let num_of_transactions = transactions.len(); - let duration_in_secs = current_batch_start_time.elapsed().as_secs_f64(); - - let transactions = - deserialize_cached_transactions(transactions, storage_format).await?; - let start_version_timestamp = transactions.first().unwrap().timestamp.as_ref(); - let end_version_timestamp = transactions.last().unwrap().timestamp.as_ref(); - - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceDataFetchedCache, - Some(starting_version as i64), - Some(starting_version as i64 + num_of_transactions as i64 - 1), - start_version_timestamp, - end_version_timestamp, - Some(duration_in_secs), - Some(size_in_bytes), - Some(num_of_transactions as i64), - Some(&request_metadata), - ); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceTxnsDecoded, - Some(starting_version as i64), - Some(starting_version as i64 + num_of_transactions as i64 - 1), - start_version_timestamp, - end_version_timestamp, - Some(decoding_start_time.elapsed().as_secs_f64()), - Some(size_in_bytes), - Some(num_of_transactions as i64), - Some(&request_metadata), - ); - - Ok(TransactionsDataStatus::Success(transactions)) - }, - Ok(CacheBatchGetStatus::EvictedFromCache) => { - let transactions = - data_fetch_from_filestore(starting_version, file_store_operator, request_metadata) - .await?; - Ok(TransactionsDataStatus::Success(transactions)) - }, - Err(e) => Err(e), - } -} - -async fn data_fetch_from_filestore( - starting_version: u64, - file_store_operator: Arc>, - request_metadata: Arc, -) -> anyhow::Result> { - // Data is evicted from the cache. Fetch from file store. - let (transactions, io_duration, decoding_duration) = file_store_operator - .get_transactions_with_durations(starting_version, NUM_DATA_FETCH_RETRIES) - .await?; - let size_in_bytes = transactions - .iter() - .map(|transaction| transaction.encoded_len()) - .sum::(); - let num_of_transactions = transactions.len(); - let start_version_timestamp = transactions.first().unwrap().timestamp.as_ref(); - let end_version_timestamp = transactions.last().unwrap().timestamp.as_ref(); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceDataFetchedFilestore, - Some(starting_version as i64), - Some(starting_version as i64 + num_of_transactions as i64 - 1), - start_version_timestamp, - end_version_timestamp, - Some(io_duration), - Some(size_in_bytes), - Some(num_of_transactions as i64), - Some(&request_metadata), - ); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceTxnsDecoded, - Some(starting_version as i64), - Some(starting_version as i64 + num_of_transactions as i64 - 1), - start_version_timestamp, - end_version_timestamp, - Some(decoding_duration), - Some(size_in_bytes), - Some(num_of_transactions as i64), - Some(&request_metadata), - ); - Ok(transactions) -} - -/// Handles the case when the data is not ready in the cache, i.e., beyond the current head. -async fn ahead_of_cache_data_handling() { - // TODO: add exponential backoff. - tokio::time::sleep(Duration::from_millis( - AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS, - )) - .await; -} - -/// Handles data fetch errors, including cache and file store related errors. -async fn data_fetch_error_handling(err: anyhow::Error, current_version: u64, chain_id: u64) { - error!( - chain_id = chain_id, - current_version = current_version, - "[Data Service] Failed to fetch data from cache and file store. {:?}", - err - ); - tokio::time::sleep(Duration::from_millis( - TRANSIENT_DATA_ERROR_RETRY_SLEEP_DURATION_MS, - )) - .await; -} - -/// Gets the request metadata. Useful for logging. -fn get_request_metadata( - req: &Request, -) -> tonic::Result { - let request_metadata_pairs = vec![ - ( - "request_identifier_type", - REQUEST_HEADER_APTOS_IDENTIFIER_TYPE, - ), - ("request_identifier", REQUEST_HEADER_APTOS_IDENTIFIER), - ("request_email", REQUEST_HEADER_APTOS_EMAIL), - ( - "request_application_name", - REQUEST_HEADER_APTOS_APPLICATION_NAME, - ), - ("request_token", GRPC_AUTH_TOKEN_HEADER), - ("processor_name", GRPC_REQUEST_NAME_HEADER), - ]; - let mut request_metadata_map: HashMap = request_metadata_pairs - .into_iter() - .map(|(key, value)| { - ( - key.to_string(), - req.metadata() - .get(value) - .map(|value| value.to_str().unwrap_or("unspecified").to_string()) - .unwrap_or("unspecified".to_string()), - ) - }) - .collect(); - request_metadata_map.insert( - "request_connection_id".to_string(), - Uuid::new_v4().to_string(), - ); - let request_metadata: IndexerGrpcRequestMetadata = - serde_json::from_str(&serde_json::to_string(&request_metadata_map).unwrap()).unwrap(); - // TODO: update the request name if these are internal requests. - Ok(request_metadata) -} - -async fn channel_send_multiple_with_timeout( - resp_items: Vec, - tx: tokio::sync::mpsc::Sender>, - request_metadata: Arc, -) -> Result<(), SendTimeoutError>> { - let overall_send_start_time = Instant::now(); - let overall_size_in_bytes = resp_items - .iter() - .map(|resp_item| resp_item.encoded_len()) - .sum::(); - let overall_start_txn = resp_items.first().unwrap().transactions.first().unwrap(); - let overall_end_txn = resp_items.last().unwrap().transactions.last().unwrap(); - let overall_start_version = overall_start_txn.version; - let overall_end_version = overall_end_txn.version; - let overall_start_txn_timestamp = overall_start_txn.clone().timestamp; - let overall_end_txn_timestamp = overall_end_txn.clone().timestamp; - - for resp_item in resp_items { - let send_start_time = Instant::now(); - let response_size = resp_item.encoded_len(); - let num_of_transactions = resp_item.transactions.len(); - let start_version = resp_item.transactions.first().unwrap().version; - let end_version = resp_item.transactions.last().unwrap().version; - let start_version_txn_timestamp = resp_item - .transactions - .first() - .unwrap() - .timestamp - .as_ref() - .unwrap(); - let end_version_txn_timestamp = resp_item - .transactions - .last() - .unwrap() - .timestamp - .as_ref() - .unwrap(); - - tx.send_timeout( - Result::::Ok(resp_item.clone()), - RESPONSE_CHANNEL_SEND_TIMEOUT, - ) - .await?; +#[tonic::async_trait] +impl RawData for DataServiceWrapperWrapper { + type GetTransactionsStream = ResponseStream; - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceChunkSent, - Some(start_version as i64), - Some(end_version as i64), - Some(start_version_txn_timestamp), - Some(end_version_txn_timestamp), - Some(send_start_time.elapsed().as_secs_f64()), - Some(response_size), - Some(num_of_transactions as i64), - Some(&request_metadata), - ); + async fn get_transactions( + &self, + req: Request, + ) -> Result, Status> { + DataService::get_transactions(self, req).await } - - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::DataServiceAllChunksSent, - Some(overall_start_version as i64), - Some(overall_end_version as i64), - overall_start_txn_timestamp.as_ref(), - overall_end_txn_timestamp.as_ref(), - Some(overall_send_start_time.elapsed().as_secs_f64()), - Some(overall_size_in_bytes), - Some((overall_end_version - overall_start_version + 1) as i64), - Some(&request_metadata), - ); - - Ok(()) } -/// This function strips transactions that match the given filter. Stripping means we -/// remove the payload, signature, events, and writesets. Note, the filter can be -/// composed of many conditions, see `BooleanTransactionFilter` for more. -/// -/// This returns the mutated txns and the number of txns that were stripped. -fn strip_transactions( - transactions: Vec, - txns_to_strip_filter: &BooleanTransactionFilter, -) -> (Vec, usize) { - let mut stripped_count = 0; - - let stripped_transactions: Vec = transactions - .into_iter() - .map(|mut txn| { - // Note: `is_allowed` means the txn matches the filter, in which case - // we strip it. - if txns_to_strip_filter.is_allowed(&txn) { - stripped_count += 1; - if let Some(info) = txn.info.as_mut() { - info.changes = vec![]; - } - if let Some(TxnData::User(user_transaction)) = txn.txn_data.as_mut() { - user_transaction.events = vec![]; - if let Some(utr) = user_transaction.request.as_mut() { - // Wipe the payload and signature. - utr.payload = None; - utr.signature = None; - } - } - } - txn - }) - .collect(); - - (stripped_transactions, stripped_count) +pub struct DataServiceWrapper { + connection_manager: Arc, + handler_tx: Sender<( + Request, + Sender>, + )>, + pub data_service_response_channel_size: usize, } -#[cfg(test)] -mod tests { - use super::*; - use aptos_protos::transaction::v1::{ - transaction::TxnData, transaction_payload::Payload, EntryFunctionId, EntryFunctionPayload, - Event, MoveModuleId, Signature, Transaction, TransactionInfo, TransactionPayload, - UserTransaction, UserTransactionRequest, WriteSetChange, - }; - use aptos_transaction_filter::{ - boolean_transaction_filter::APIFilter, filters::UserTransactionFilterBuilder, - EntryFunctionFilterBuilder, UserTransactionPayloadFilterBuilder, - }; - - fn create_test_transaction( - module_address: String, - module_name: String, - function_name: String, - ) -> Transaction { - Transaction { - version: 1, - txn_data: Some(TxnData::User(UserTransaction { - request: Some(UserTransactionRequest { - payload: Some(TransactionPayload { - r#type: 1, - payload: Some(Payload::EntryFunctionPayload(EntryFunctionPayload { - function: Some(EntryFunctionId { - module: Some(MoveModuleId { - address: module_address, - name: module_name, - }), - name: function_name, - }), - ..Default::default() - })), - }), - signature: Some(Signature::default()), - ..Default::default() - }), - events: vec![Event::default()], - })), - info: Some(TransactionInfo { - changes: vec![WriteSetChange::default()], - ..Default::default() - }), - ..Default::default() +impl DataServiceWrapper { + pub fn new( + connection_manager: Arc, + handler_tx: Sender<( + Request, + Sender>, + )>, + data_service_response_channel_size: usize, + ) -> Self { + Self { + connection_manager, + handler_tx, + data_service_response_channel_size, } } +} - #[test] - fn test_ensure_sequential_transactions_merges_and_sorts() { - let transactions1 = (1..5) - .map(|i| Transaction { - version: i, - ..Default::default() - }) - .collect(); - let transactions2 = (5..10) - .map(|i| Transaction { - version: i, - ..Default::default() - }) - .collect(); - // No overlap, just normal fetching flow - let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); - assert_eq!(transactions1.len(), 9); - assert_eq!(transactions1.first().unwrap().version, 1); - assert_eq!(transactions1.last().unwrap().version, 9); - - // This is a full overlap - let transactions2 = (5..7) - .map(|i| Transaction { - version: i, - ..Default::default() - }) - .collect(); - let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); - assert_eq!(transactions1.len(), 9); - assert_eq!(transactions1.first().unwrap().version, 1); - assert_eq!(transactions1.last().unwrap().version, 9); - - // Partial overlap - let transactions2 = (5..12) - .map(|i| Transaction { - version: i, - ..Default::default() - }) - .collect(); - let transactions1 = ensure_sequential_transactions(vec![transactions1, transactions2]); - assert_eq!(transactions1.len(), 11); - assert_eq!(transactions1.first().unwrap().version, 1); - assert_eq!(transactions1.last().unwrap().version, 11); - } - - const MODULE_ADDRESS: &str = "0x1234"; - const MODULE_NAME: &str = "module"; - const FUNCTION_NAME: &str = "function"; - - #[test] - fn test_transactions_are_stripped_correctly_sender_addresses() { - let sender_address = "0x1234".to_string(); - // Create a transaction with a user transaction - let txn = Transaction { - version: 1, - txn_data: Some(TxnData::User(UserTransaction { - request: Some(UserTransactionRequest { - sender: sender_address.clone(), - payload: Some(TransactionPayload::default()), - signature: Some(Signature::default()), - ..Default::default() - }), - events: vec![Event::default()], - })), - info: Some(TransactionInfo { - changes: vec![WriteSetChange::default()], - ..Default::default() - }), - ..Default::default() - }; - - // Create filter for senders to ignore. - let sender_filters = vec![sender_address] - .into_iter() - .map(|address| { - BooleanTransactionFilter::from(APIFilter::UserTransactionFilter( - UserTransactionFilterBuilder::default() - .sender(address) - .build() - .unwrap(), - )) - }) - .collect(); - let filter = BooleanTransactionFilter::new_or(sender_filters); +#[tonic::async_trait] +impl DataService for DataServiceWrapper { + type GetTransactionsStream = ResponseStream; - let (filtered_txns, num_stripped) = strip_transactions(vec![txn], &filter); - assert_eq!(num_stripped, 1); - assert_eq!(filtered_txns.len(), 1); - let txn = filtered_txns.first().unwrap(); - let user_transaction = match &txn.txn_data { - Some(TxnData::User(user_transaction)) => user_transaction, - _ => panic!("Expected user transaction"), - }; - assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); - assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); - assert_eq!(user_transaction.events.len(), 0); - assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); - } + async fn get_transactions( + &self, + req: Request, + ) -> Result, Status> { + let (tx, rx) = channel(self.data_service_response_channel_size); + self.handler_tx.send((req, tx)).await.unwrap(); - #[test] - fn test_transactions_are_stripped_correctly_module_address() { - let txn = create_test_transaction( - MODULE_ADDRESS.to_string(), - MODULE_NAME.to_string(), - FUNCTION_NAME.to_string(), - ); - // Testing filter with only address set - let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( - APIFilter::UserTransactionFilter( - UserTransactionFilterBuilder::default() - .payload( - UserTransactionPayloadFilterBuilder::default() - .function( - EntryFunctionFilterBuilder::default() - .address(MODULE_ADDRESS.to_string()) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ), - )]); + let output_stream = ReceiverStream::new(rx); + let response = Response::new(Box::pin(output_stream) as Self::GetTransactionsStream); - let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); - assert_eq!(num_stripped, 1); - assert_eq!(filtered_txns.len(), 1); - let txn = filtered_txns.first().unwrap(); - let user_transaction = match &txn.txn_data { - Some(TxnData::User(user_transaction)) => user_transaction, - _ => panic!("Expected user transaction"), - }; - assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); - assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); - assert_eq!(user_transaction.events.len(), 0); - assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); + Ok(response) } - #[test] - fn test_transactions_are_stripped_correctly_module_name() { - let txn = create_test_transaction( - MODULE_ADDRESS.to_string(), - MODULE_NAME.to_string(), - FUNCTION_NAME.to_string(), - ); - // Testing filter with only module set - let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( - APIFilter::UserTransactionFilter( - UserTransactionFilterBuilder::default() - .payload( - UserTransactionPayloadFilterBuilder::default() - .function( - EntryFunctionFilterBuilder::default() - .module(MODULE_NAME.to_string()) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ), - )]); - - let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); - assert_eq!(num_stripped, 1); - assert_eq!(filtered_txns.len(), 1); - let txn = filtered_txns.first().unwrap(); - let user_transaction = match &txn.txn_data { - Some(TxnData::User(user_transaction)) => user_transaction, - _ => panic!("Expected user transaction"), + async fn ping( + &self, + req: Request, + ) -> Result, Status> { + let request = req.into_inner(); + let known_latest_version = request.known_latest_version(); + self.connection_manager + .update_known_latest_version(known_latest_version); + let stream_info = StreamInfo { + active_streams: self.connection_manager.get_active_streams(), }; - assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); - assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); - assert_eq!(user_transaction.events.len(), 0); - assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); - } - - #[test] - fn test_transactions_are_stripped_correctly_function_name() { - let txn = create_test_transaction( - MODULE_ADDRESS.to_string(), - MODULE_NAME.to_string(), - FUNCTION_NAME.to_string(), - ); - // Testing filter with only function set - let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( - APIFilter::UserTransactionFilter( - UserTransactionFilterBuilder::default() - .payload( - UserTransactionPayloadFilterBuilder::default() - .function( - EntryFunctionFilterBuilder::default() - .function(FUNCTION_NAME.to_string()) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ), - )]); - - let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); - assert_eq!(num_stripped, 1); - assert_eq!(filtered_txns.len(), 1); - let txn = filtered_txns.first().unwrap(); - let user_transaction = match &txn.txn_data { - Some(TxnData::User(user_transaction)) => user_transaction, - _ => panic!("Expected user transaction"), + let info = DataServiceInfo { + timestamp: Some(timestamp_now_proto()), + known_latest_version: Some(known_latest_version), + stream_info: Some(stream_info), }; - assert_eq!(user_transaction.request.as_ref().unwrap().payload, None); - assert_eq!(user_transaction.request.as_ref().unwrap().signature, None); - assert_eq!(user_transaction.events.len(), 0); - assert_eq!(txn.info.as_ref().unwrap().changes.len(), 0); - } - #[test] - fn test_transactions_are_not_stripped() { - let txn = create_test_transaction( - MODULE_ADDRESS.to_string(), - MODULE_NAME.to_string(), - FUNCTION_NAME.to_string(), - ); - // Testing filter with wrong filter - let filter = BooleanTransactionFilter::new_or(vec![BooleanTransactionFilter::from( - APIFilter::UserTransactionFilter( - UserTransactionFilterBuilder::default() - .payload( - UserTransactionPayloadFilterBuilder::default() - .function( - EntryFunctionFilterBuilder::default() - .function("0xrandom".to_string()) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ) - .build() - .unwrap(), - ), - )]); + let response = PingDataServiceResponse { info: Some(info) }; - let (filtered_txns, num_stripped) = strip_transactions(vec![txn.clone()], &filter); - assert_eq!(num_stripped, 0); - assert_eq!(filtered_txns.len(), 1); - let txn = filtered_txns.first().unwrap(); - let user_transaction = match &txn.txn_data { - Some(TxnData::User(user_transaction)) => user_transaction, - _ => panic!("Expected user transaction"), - }; - assert_ne!(user_transaction.request.as_ref().unwrap().payload, None); - assert_ne!(user_transaction.request.as_ref().unwrap().signature, None); - assert_ne!(user_transaction.events.len(), 0); - assert_ne!(txn.info.as_ref().unwrap().changes.len(), 0); + Ok(Response::new(response)) } } diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-file-store/Cargo.toml index 57d4eed5e863b..3869b06b40c30 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/Cargo.toml @@ -18,14 +18,24 @@ aptos-indexer-grpc-server-framework = { workspace = true } aptos-indexer-grpc-utils = { workspace = true } aptos-metrics-core = { workspace = true } aptos-moving-average = { workspace = true } +aptos-protos = { workspace = true } async-trait = { workspace = true } +build_html = { workspace = true } clap = { workspace = true } +dashmap = { workspace = true } futures = { workspace = true } once_cell = { workspace = true } +prost = { workspace = true } +rand = { workspace = true } redis = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } tokio = { workspace = true } +tokio-scoped = { workspace = true } +tokio-stream = { workspace = true } +tonic = { workspace = true } tracing = { workspace = true } +warp = { workspace = true } [target.'cfg(unix)'.dependencies] jemallocator = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/data_manager.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/data_manager.rs new file mode 100644 index 0000000000000..608e33cb070e6 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/data_manager.rs @@ -0,0 +1,287 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::metadata_manager::MetadataManager; +use anyhow::{bail, Result}; +use aptos_indexer_grpc_utils::{ + config::IndexerGrpcFileStoreConfig, file_store_operator_v2::FileStoreOperatorV2, +}; +use aptos_protos::{ + internal::fullnode::v1::{ + transactions_from_node_response::Response, GetTransactionsFromNodeRequest, + }, + transaction::v1::Transaction, +}; +use futures::StreamExt; +use prost::Message; +use std::{ + collections::VecDeque, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; +use tokio::sync::{mpsc::channel, RwLock}; +use tracing::{error, trace, warn}; + +const MAX_CACHE_SIZE: usize = 10 * (1 << 30); +const TARGET_CACHE_SIZE: usize = 8 * (1 << 30); + +struct Cache { + start_version: u64, + file_store_version: AtomicU64, + transactions: VecDeque, + cache_size: usize, +} + +impl Cache { + fn new(file_store_version: u64) -> Self { + Self { + start_version: file_store_version, + file_store_version: AtomicU64::new(file_store_version), + transactions: VecDeque::new(), + cache_size: 0, + } + } + + fn maybe_evict(&mut self) -> bool { + if self.cache_size <= MAX_CACHE_SIZE { + return true; + } + + while self.start_version < self.file_store_version.load(Ordering::SeqCst) + && self.cache_size > TARGET_CACHE_SIZE + { + let transaction = self.transactions.pop_front().unwrap(); + self.cache_size -= transaction.encoded_len(); + self.start_version += 1; + } + + self.cache_size <= MAX_CACHE_SIZE + } + + fn put_transactions(&mut self, transactions: Vec) { + self.cache_size += transactions + .iter() + .map(|transaction| transaction.encoded_len()) + .sum::(); + self.transactions.extend(transactions); + } + + fn get_transactions( + &self, + start_version: u64, + max_size_bytes: usize, + update_file_store_version: bool, + ) -> Vec { + if !update_file_store_version { + trace!( + "Requesting version {start_version} from cache, update_file_store_version = {update_file_store_version}.", + ); + trace!( + "Current data range in cache: [{}, {}).", + self.start_version, + self.start_version + self.transactions.len() as u64 + ); + } + if start_version < self.start_version { + return vec![]; + } + + let mut transactions = vec![]; + let mut size_bytes = 0; + for transaction in self + .transactions + .iter() + .skip((start_version - self.start_version) as usize) + { + size_bytes += transaction.encoded_len(); + transactions.push(transaction.clone()); + if size_bytes > max_size_bytes { + // Note: We choose to not pop the last transaction here, so the size could be + // slightly larger than the `max_size_bytes`. This is fine. + break; + } + } + if update_file_store_version { + self.file_store_version + .fetch_add(transactions.len() as u64, Ordering::SeqCst); + } else { + trace!( + "Returned {} transactions from Cache, total {size_bytes} bytes.", + transactions.len() + ); + } + transactions + } +} + +pub(crate) struct DataManager { + cache: RwLock, + file_store_operator: FileStoreOperatorV2, + metadata_manager: Arc, +} + +impl DataManager { + pub(crate) async fn new( + chain_id: u64, + file_store_config: IndexerGrpcFileStoreConfig, + file_store_version: u64, + metadata_manager: Arc, + ) -> Self { + let file_store = file_store_config.create_filestore().await; + let file_store_operator = FileStoreOperatorV2::new(chain_id, file_store, 10000); + Self { + cache: RwLock::new(Cache::new(file_store_version)), + file_store_operator, + metadata_manager, + } + } + + pub(crate) async fn start(&self) { + 'out: loop { + let mut fullnode_client = self.metadata_manager.get_fullnode_for_request(); + let cache = self.cache.read().await; + let request = GetTransactionsFromNodeRequest { + starting_version: Some(cache.start_version + cache.transactions.len() as u64), + transactions_count: Some(100000), + }; + drop(cache); + + let response = fullnode_client.get_transactions_from_node(request).await; + if response.is_err() { + warn!( + "Error when getting transactions from fullnode: {}", + response.err().unwrap() + ); + tokio::time::sleep(Duration::from_millis(100)).await; + continue; + } + + let mut response = response.unwrap().into_inner(); + while let Some(response_item) = response.next().await { + loop { + if self.cache.write().await.maybe_evict() { + break; + } + let cache = self.cache.read().await; + warn!("Filestore is lagging behind, cache is full [{}, {}), known_latest_version ({}).", cache.start_version, cache.start_version + cache.transactions.len() as u64, self.metadata_manager.get_known_latest_version()); + tokio::time::sleep(Duration::from_millis(100)).await; + } + match response_item { + Ok(r) => { + if let Some(response) = r.response { + match response { + Response::Data(data) => { + self.cache.write().await.put_transactions(data.transactions); + }, + Response::Status(_) => continue, + } + } else { + warn!("Error when getting transactions from fullnode: no data."); + continue 'out; + } + }, + Err(e) => { + warn!("Error when getting transactions from fullnode: {}", e); + continue 'out; + }, + } + } + } + } + + pub(crate) fn lagging(&self, cache_next_version: u64) -> bool { + // TODO(grao): Need a better way, we can use the information in the metadata_manager. + cache_next_version + 20000 < self.metadata_manager.get_known_latest_version() + } + + pub(crate) async fn get_transactions( + &self, + start_version: u64, + max_size: usize, + ) -> Result> { + let cache = self.cache.read().await; + let cache_start_version = cache.start_version; + let cache_next_version = cache_start_version + cache.transactions.len() as u64; + drop(cache); + + if start_version >= cache_start_version { + if start_version >= cache_next_version { + // If lagging, try to fetch the data from FN. + if self.lagging(cache_next_version) { + trace!("GrpcManager is lagging, getting data from FN, requested_version: {start_version}, cache_next_version: {cache_next_version}."); + let request = GetTransactionsFromNodeRequest { + starting_version: Some(cache_next_version), + transactions_count: Some(5000), + }; + + let mut fullnode_client = self.metadata_manager.get_fullnode_for_request(); + let response = fullnode_client.get_transactions_from_node(request).await?; + let mut response = response.into_inner(); + while let Some(Ok(response_item)) = response.next().await { + if let Some(response) = response_item.response { + match response { + Response::Data(data) => { + return Ok(data.transactions); + }, + Response::Status(_) => continue, + } + } + } + } + + // Let client side to retry. + return Ok(vec![]); + } + // NOTE: We are not holding the read lock for cache here. Therefore it's possible that + // the start_version becomes older than the cache.start_version. In that case the + // following function will return empty return, and let the client to retry. + return Ok(self + .get_transactions_from_cache( + start_version, + max_size, + /*update_file_store_version=*/ false, + ) + .await); + } + + let (tx, mut rx) = channel(1); + self.file_store_operator + .get_transaction_batch( + start_version, + /*retries=*/ 3, + /*max_files=*/ Some(1), + tx, + ) + .await; + + if let Some(mut transactions) = rx.recv().await { + trace!( + "Transactions returned from filestore: [{start_version}, {}).", + transactions.last().unwrap().version + ); + let first_version = transactions.first().unwrap().version; + Ok(transactions.split_off((first_version - start_version) as usize)) + } else { + let error_msg = "Failed to fetch transactions from filestore, either filestore is not available, or data is corrupted."; + // TODO(grao): Consider downgrade this to warn! if this happens too frequently when + // filestore is unavailable. + error!(error_msg); + bail!(error_msg); + } + } + + pub(crate) async fn get_transactions_from_cache( + &self, + start_version: u64, + max_size: usize, + update_file_store_version: bool, + ) -> Vec { + self.cache + .read() + .await + .get_transactions(start_version, max_size, update_file_store_version) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/file_store_uploader.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/file_store_uploader.rs new file mode 100644 index 0000000000000..ed00fea77bfe0 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/file_store_uploader.rs @@ -0,0 +1,127 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::data_manager::DataManager; +use anyhow::Result; +use aptos_indexer_grpc_utils::{ + compression_util::{FileEntry, StorageFormat}, + config::IndexerGrpcFileStoreConfig, + file_store_operator_v2::{BatchMetadata, FileStoreOperatorV2}, +}; +use aptos_protos::transaction::v1::Transaction; +use prost::Message; +use std::{sync::Arc, time::Duration}; +use tracing::info; + +const NUM_TXNS_PER_FOLDER: u64 = 10000; +const MAX_SIZE_PER_FILE: usize = 20 * (1 << 20); + +pub(crate) struct FileStoreUploader { + file_store_operator: FileStoreOperatorV2, + buffer: Vec, + buffer_size: usize, + buffer_batch_metadata: BatchMetadata, + version: u64, +} + +impl FileStoreUploader { + pub(crate) async fn new( + chain_id: u64, + file_store_config: IndexerGrpcFileStoreConfig, + ) -> Result { + let file_store = file_store_config.create_filestore().await; + let file_store_operator = + FileStoreOperatorV2::new(chain_id, file_store, NUM_TXNS_PER_FOLDER); + + file_store_operator.maybe_init_metadata().await?; + + let version = file_store_operator + .get_latest_version() + .await + .expect("Latest version must exist."); + + Ok(Self { + file_store_operator, + buffer: vec![], + buffer_size: 0, + buffer_batch_metadata: BatchMetadata::default(), + version, + }) + } + + pub(crate) async fn start(&mut self, data_manager: Arc) -> Result<()> { + loop { + let transactions = data_manager + .get_transactions_from_cache( + self.version, + MAX_SIZE_PER_FILE, + /*update_file_store_version=*/ true, + ) + .await; + let len = transactions.len(); + for transaction in transactions { + self.buffer_and_maybe_dump_transactions_to_file(transaction) + .await?; + } + self.version += len as u64; + if len == 0 { + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + } + + async fn buffer_and_maybe_dump_transactions_to_file( + &mut self, + transaction: Transaction, + ) -> Result<()> { + let end_batch = (transaction.version + 1) % 10000 == 0; + let size = transaction.encoded_len(); + self.buffer.push(transaction); + self.buffer_size += size; + if self.buffer_size >= MAX_SIZE_PER_FILE || end_batch { + self.dump_transactions_to_file(end_batch).await?; + } + + Ok(()) + } + + async fn dump_transactions_to_file(&mut self, end_batch: bool) -> Result<()> { + let first_version = self.buffer.first().unwrap().version; + let last_version = self.buffer.last().unwrap().version; + // TODO(grao): This is slow, need to move to a different thread. + let data_file = FileEntry::from_transactions( + std::mem::take(&mut self.buffer), + StorageFormat::Lz4CompressedProto, + ); + let data_size = self.buffer_size; + self.buffer_size = 0; + let path = self.file_store_operator.get_path_for_version(first_version); + self.buffer_batch_metadata + .files + .push((first_version, data_size)); + info!("Dumping transactions [{first_version}, {last_version}] to file {path:?}."); + self.file_store_operator + .save_raw_file(path, data_file.into_inner()) + .await?; + if end_batch { + let batch_metadata_path = self + .file_store_operator + .get_path_for_batch_metadata(first_version); + self.file_store_operator + .save_raw_file( + batch_metadata_path, + serde_json::to_vec(&self.buffer_batch_metadata).map_err(anyhow::Error::msg)?, + ) + .await?; + self.file_store_operator + .update_file_store_metadata(last_version + 1) + .await?; + self.buffer_batch_metadata = BatchMetadata::default(); + } + Ok(()) + } + + pub(crate) fn version(&self) -> u64 { + self.version + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs index 336a002ca9f72..659f2d56d22c7 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs @@ -1,67 +1,385 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +pub mod data_manager; +pub mod file_store_uploader; +pub mod metadata_manager; pub mod metrics; -pub mod processor; +pub mod service; +use crate::{ + data_manager::DataManager, metadata_manager::MetadataManager, service::GrpcManagerService, +}; use anyhow::Result; use aptos_indexer_grpc_server_framework::RunnableConfig; -use aptos_indexer_grpc_utils::{config::IndexerGrpcFileStoreConfig, types::RedisUrl}; -use processor::Processor; +use aptos_indexer_grpc_utils::{ + config::IndexerGrpcFileStoreConfig, + status_page::{render_status_page, Tab}, +}; +use aptos_protos::indexer::v1::{ + grpc_manager_server::GrpcManagerServer, DataServiceInfo, FullnodeInfo, +}; +use build_html::{ + Container, ContainerType, HtmlContainer, HtmlElement, HtmlTag, Table, TableCell, TableCellType, + TableRow, +}; +use file_store_uploader::FileStoreUploader; +use futures::executor::block_on; +use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; +use std::{ + collections::{HashMap, VecDeque}, + net::SocketAddr, + sync::Arc, + time::Duration, +}; +use tokio::sync::Mutex; +use tonic::{codec::CompressionEncoding, transport::Server}; +use warp::{reply::Response, Rejection}; + +const HTTP2_PING_INTERVAL_DURATION: Duration = Duration::from_secs(60); +const HTTP2_PING_TIMEOUT_DURATION: Duration = Duration::from_secs(10); + +static GRPC_MANAGER: OnceCell = OnceCell::new(); + +#[derive(Clone, Debug, Deserialize, Serialize)] +struct ServiceConfig { + listen_address: SocketAddr, +} #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -pub struct IndexerGrpcFileStoreWorkerConfig { - pub file_store_config: IndexerGrpcFileStoreConfig, - pub redis_main_instance_address: RedisUrl, - pub enable_expensive_logging: Option, - pub chain_id: u64, - #[serde(default = "default_enable_cache_compression")] - pub enable_cache_compression: bool, +pub struct IndexerGrpcManagerConfig { + chain_id: u64, + service_config: ServiceConfig, + file_store_config: IndexerGrpcFileStoreConfig, + self_advertised_address: String, + grpc_manager_addresses: Vec, + fullnode_addresses: Vec, +} + +#[async_trait::async_trait] +impl RunnableConfig for IndexerGrpcManagerConfig { + async fn run(&self) -> Result<()> { + GRPC_MANAGER + .get_or_init(|| block_on(GrpcManager::new(self))) + .start(&self.service_config); + + Ok(()) + } + + fn get_server_name(&self) -> String { + "grpc_manager".to_string() + } + + async fn status_page(&self) -> Result { + let mut tabs = vec![]; + + if let Some(grpc_manager) = GRPC_MANAGER.get() { + let metadata_manager = grpc_manager.get_metadata_manager(); + tabs.push(render_fullnode_tab(metadata_manager.get_fullnodes_info())); + let live_data_services_info = metadata_manager.get_live_data_services_info(); + let historical_data_services_info = + metadata_manager.get_historical_data_services_info(); + tabs.push(render_data_service_tab( + "LiveDataServices", + &live_data_services_info, + )); + tabs.push(render_data_service_tab( + "HistoricalDataServices", + &historical_data_services_info, + )); + tabs.push(render_stream_tab( + &live_data_services_info, + &historical_data_services_info, + )); + } + + render_status_page(tabs) + } } -const fn default_enable_cache_compression() -> bool { - false +struct GrpcManager { + chain_id: u64, + filestore_uploader: Mutex, + metadata_manager: Arc, + data_manager: Arc, } -impl IndexerGrpcFileStoreWorkerConfig { - pub fn new( - file_store_config: IndexerGrpcFileStoreConfig, - redis_main_instance_address: RedisUrl, - enable_expensive_logging: Option, - chain_id: u64, - enable_cache_compression: bool, - ) -> Self { +impl GrpcManager { + pub(crate) async fn new(config: &IndexerGrpcManagerConfig) -> Self { + let chain_id = config.chain_id; + let filestore_uploader = Mutex::new( + FileStoreUploader::new(chain_id, config.file_store_config.clone()) + .await + .expect(&format!( + "Failed to create filestore uploader, config: {:?}.", + config.file_store_config + )), + ); + let metadata_manager = Arc::new(MetadataManager::new( + config.self_advertised_address.clone(), + config.grpc_manager_addresses.clone(), + config.fullnode_addresses.clone(), + )); + let data_manager = Arc::new( + DataManager::new( + chain_id, + config.file_store_config.clone(), + filestore_uploader.lock().await.version(), + metadata_manager.clone(), + ) + .await, + ); Self { - file_store_config, - redis_main_instance_address, - enable_expensive_logging, chain_id, - enable_cache_compression, + filestore_uploader, + metadata_manager, + data_manager, } } -} -#[async_trait::async_trait] -impl RunnableConfig for IndexerGrpcFileStoreWorkerConfig { - async fn run(&self) -> Result<()> { - let mut processor = Processor::new( - self.redis_main_instance_address.clone(), - self.file_store_config.clone(), + pub(crate) fn start(&self, service_config: &ServiceConfig) { + let service = GrpcManagerServer::new(GrpcManagerService::new( self.chain_id, - self.enable_cache_compression, - ) - .await - .expect("Failed to create file store processor"); - processor - .run() - .await - .expect("File store processor exited unexpectedly"); - Ok(()) + self.metadata_manager.clone(), + self.data_manager.clone(), + )) + .send_compressed(CompressionEncoding::Zstd) + .accept_compressed(CompressionEncoding::Zstd); + let server = Server::builder() + .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) + .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) + .add_service(service); + + tokio_scoped::scope(|s| { + s.spawn(async move { + self.metadata_manager.start().await.unwrap(); + }); + s.spawn(async move { self.data_manager.start().await }); + s.spawn(async move { + self.filestore_uploader + .lock() + .await + .start(self.data_manager.clone()) + .await + .unwrap(); + }); + s.spawn(async move { + server.serve(service_config.listen_address).await.unwrap(); + }); + }); } - fn get_server_name(&self) -> String { - "idxfilestore".to_string() + fn get_metadata_manager(&self) -> &MetadataManager { + &self.metadata_manager } } + +fn render_fullnode_tab(fullnodes_info: HashMap>) -> Tab { + let overview = Container::new(ContainerType::Section) + .with_paragraph_attr( + "Connected Fullnodes", + [("style", "font-size: 24px; font-weight: bold;")], + ) + .with_table( + fullnodes_info.into_iter().fold( + Table::new() + .with_attributes([("style", "width: 100%; border: 5px solid black;")]) + .with_thead_attributes([( + "style", + "background-color: lightcoral; color: white;", + )]) + .with_custom_header_row( + TableRow::new() + .with_cell(TableCell::new(TableCellType::Header).with_raw("Id")) + .with_cell( + TableCell::new(TableCellType::Header) + .with_raw("Last Ping/Heartbeat Time"), + ) + .with_cell( + TableCell::new(TableCellType::Header) + .with_raw("Known Latest Version"), + ), + ), + |table, fullnode_info| { + let last_sample = fullnode_info.1.back(); + let (timestamp, known_latest_version) = if let Some(last_sample) = last_sample { + ( + format!("{:?}", last_sample.timestamp.unwrap()), + format!("{}", last_sample.known_latest_version()), + ) + } else { + ("No data point.".to_string(), "No data point.".to_string()) + }; + table.with_custom_body_row( + TableRow::new() + .with_cell( + TableCell::new(TableCellType::Data).with_raw(fullnode_info.0), + ) + .with_cell(TableCell::new(TableCellType::Data).with_raw(timestamp)) + .with_cell( + TableCell::new(TableCellType::Data).with_raw(known_latest_version), + ), + ) + }, + ), + ); + let content = HtmlElement::new(HtmlTag::Div) + .with_container(overview) + .into(); + + Tab::new("Fullnodes", content) +} + +fn render_data_service_tab( + tab_name: &str, + data_services_info: &HashMap>, +) -> Tab { + let overview = Container::new(ContainerType::Section) + .with_paragraph_attr( + format!("Connected {tab_name}"), + [("style", "font-size: 24px; font-weight: bold;")], + ) + .with_table( + data_services_info.iter().fold( + Table::new() + .with_attributes([("style", "width: 100%; border: 5px solid black;")]) + .with_thead_attributes([( + "style", + "background-color: lightcoral; color: white;", + )]) + .with_custom_header_row( + TableRow::new() + .with_cell(TableCell::new(TableCellType::Header).with_raw("Id")) + .with_cell( + TableCell::new(TableCellType::Header) + .with_raw("Last Ping/Heartbeat Time"), + ) + .with_cell( + TableCell::new(TableCellType::Header) + .with_raw("Known Latest Version"), + ) + .with_cell( + TableCell::new(TableCellType::Header) + .with_raw("# of Connected Streams"), + ), + ), + |table, data_service_info| { + let last_sample = data_service_info.1.back(); + let (timestamp, known_latest_version, num_connected_streams) = + if let Some(last_sample) = last_sample { + ( + format!("{:?}", last_sample.timestamp.unwrap()), + format!("{}", last_sample.known_latest_version()), + format!( + "{}", + last_sample + .stream_info + .as_ref() + .map(|stream_info| stream_info.active_streams.len()) + .unwrap_or(0) + ), + ) + } else { + ( + "No data point.".to_string(), + "No data point.".to_string(), + "No data point.".to_string(), + ) + }; + table.with_custom_body_row( + TableRow::new() + .with_cell( + TableCell::new(TableCellType::Data).with_raw(data_service_info.0), + ) + .with_cell(TableCell::new(TableCellType::Data).with_raw(timestamp)) + .with_cell( + TableCell::new(TableCellType::Data).with_raw(known_latest_version), + ) + .with_cell( + TableCell::new(TableCellType::Data).with_raw(num_connected_streams), + ), + ) + }, + ), + ); + let content = HtmlElement::new(HtmlTag::Div) + .with_container(overview) + .into(); + + Tab::new(tab_name, content) +} + +fn render_stream_table(data_services_info: &HashMap>) -> Table { + data_services_info.iter().fold( + Table::new() + .with_attributes([("style", "width: 100%; border: 5px solid black;")]) + .with_thead_attributes([("style", "background-color: lightcoral; color: white;")]) + .with_custom_header_row( + TableRow::new() + .with_cell(TableCell::new(TableCellType::Header).with_raw("Stream Id")) + .with_cell(TableCell::new(TableCellType::Header).with_raw("Timestamp")) + .with_cell(TableCell::new(TableCellType::Header).with_raw("Current Version")) + .with_cell(TableCell::new(TableCellType::Header).with_raw("End Version")) + .with_cell( + TableCell::new(TableCellType::Header).with_raw("Data Service Instance"), + ), + ), + |mut table, data_service_info| { + if let Some(last_sample) = data_service_info.1.back() { + let timestamp = format!("{:?}", last_sample.timestamp.unwrap()); + if let Some(stream_info) = last_sample.stream_info.as_ref() { + stream_info.active_streams.iter().for_each(|stream| { + table.add_custom_body_row( + TableRow::new() + .with_cell( + TableCell::new(TableCellType::Data).with_raw(stream.id()), + ) + .with_cell(TableCell::new(TableCellType::Data).with_raw(×tamp)) + .with_cell( + TableCell::new(TableCellType::Data) + .with_raw(stream.current_version()), + ) + .with_cell( + TableCell::new(TableCellType::Data) + .with_raw(stream.end_version()), + ) + .with_cell( + TableCell::new(TableCellType::Data) + .with_raw(data_service_info.0), + ), + ) + }); + } + } + table + }, + ) +} + +fn render_stream_tab( + live_data_services_info: &HashMap>, + historical_data_services_info: &HashMap>, +) -> Tab { + let overview = Container::new(ContainerType::Section) + .with_paragraph_attr( + format!("Connected Streams"), + [("style", "font-size: 24px; font-weight: bold;")], + ) + .with_paragraph_attr( + format!("LiveDataService Streams"), + [("style", "font-size: 18px; font-weight: bold;")], + ) + .with_table(render_stream_table(live_data_services_info)) + .with_paragraph_attr( + format!("HistoricalDataService Streams"), + [("style", "font-size: 18px; font-weight: bold;")], + ) + .with_table(render_stream_table(historical_data_services_info)); + let content = HtmlElement::new(HtmlTag::Div) + .with_container(overview) + .into(); + + Tab::new("Streams", content) +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs index 3e2d0671339bc..c57ab48cb94c9 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/main.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::Result; -use aptos_indexer_grpc_file_store::IndexerGrpcFileStoreWorkerConfig; +use aptos_indexer_grpc_file_store::IndexerGrpcManagerConfig; use aptos_indexer_grpc_server_framework::ServerArgs; use clap::Parser; @@ -13,7 +13,7 @@ static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; #[tokio::main] async fn main() -> Result<()> { let args = ServerArgs::parse(); - args.run::() + args.run::() .await .expect("Failed to run server"); Ok(()) diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/metadata_manager.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/metadata_manager.rs new file mode 100644 index 0000000000000..fac02d1d15b5e --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/metadata_manager.rs @@ -0,0 +1,393 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::{bail, Result}; +use aptos_indexer_grpc_utils::timestamp_now_proto; +use aptos_protos::{ + indexer::v1::{ + data_service_client::DataServiceClient, grpc_manager_client::GrpcManagerClient, + service_info::ServiceType, DataServiceInfo, FullnodeInfo, GrpcManagerInfo, + HeartbeatRequest, PingDataServiceRequest, ServiceInfo, + }, + internal::fullnode::v1::{fullnode_data_client::FullnodeDataClient, PingFullnodeRequest}, + util::timestamp::Timestamp, +}; +use dashmap::DashMap; +use rand::prelude::*; +use std::{ + collections::{HashMap, VecDeque}, + sync::atomic::{AtomicU64, Ordering}, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; +use tonic::transport::channel::Channel; +use tracing::{info, trace}; + +const MAX_NUM_OF_STATES_TO_KEEP: usize = 100; + +struct Peer { + client: GrpcManagerClient, + recent_states: VecDeque, +} + +impl Peer { + fn new(address: String) -> Self { + let channel = Channel::from_shared(address) + .expect("Bad address.") + .connect_lazy(); + let client = GrpcManagerClient::new(channel); + Self { + client, + recent_states: VecDeque::new(), + } + } +} + +struct Fullnode { + client: FullnodeDataClient, + recent_states: VecDeque, +} + +impl Fullnode { + fn new(address: String) -> Self { + let channel = Channel::from_shared(address) + .expect("Bad address.") + .connect_lazy(); + let client = FullnodeDataClient::new(channel); + Self { + client, + recent_states: VecDeque::new(), + } + } +} + +struct LiveDataService { + client: DataServiceClient, + recent_states: VecDeque, +} + +impl LiveDataService { + fn new(address: String) -> Self { + let channel = Channel::from_shared(address) + .expect("Bad address.") + .connect_lazy(); + let client = DataServiceClient::new(channel); + Self { + client, + recent_states: VecDeque::new(), + } + } +} + +struct HistoricalDataService { + client: DataServiceClient, + recent_states: VecDeque, +} + +impl HistoricalDataService { + fn new(address: String) -> Self { + let channel = Channel::from_shared(address) + .expect("Bad address.") + .connect_lazy(); + let client = DataServiceClient::new(channel); + Self { + client, + recent_states: VecDeque::new(), + } + } +} + +pub(crate) struct MetadataManager { + self_advertised_address: String, + grpc_managers: DashMap, + fullnodes: DashMap, + live_data_services: DashMap, + historical_data_services: DashMap, + known_latest_version: AtomicU64, +} + +impl MetadataManager { + pub(crate) fn new( + self_advertised_address: String, + grpc_manager_addresses: Vec, + fullnode_addresses: Vec, + ) -> Self { + let grpc_managers = DashMap::new(); + for address in grpc_manager_addresses { + grpc_managers.insert(address.clone(), Peer::new(address)); + } + let fullnodes = DashMap::new(); + for address in fullnode_addresses { + fullnodes.insert(address.clone(), Fullnode::new(address)); + } + Self { + self_advertised_address, + grpc_managers, + fullnodes, + live_data_services: DashMap::new(), + historical_data_services: DashMap::new(), + known_latest_version: AtomicU64::new(0), + } + } + + fn need_ping(latest_state_timestamp: Timestamp, threshold: Duration) -> bool { + let latest_state_timestamp_since_epoch = Duration::new( + latest_state_timestamp.seconds as u64, + latest_state_timestamp.nanos as u32, + ); + let now_since_epoch = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + let staleness = now_since_epoch.saturating_sub(latest_state_timestamp_since_epoch); + + staleness >= threshold + } + + pub(crate) async fn start(&self) -> Result<()> { + loop { + tokio_scoped::scope(|s| { + for kv in &self.grpc_managers { + let grpc_manager = kv.value(); + let client = grpc_manager.client.clone(); + s.spawn(async move { + let _ = self.heartbeat(client).await; + }); + } + + for kv in &self.fullnodes { + let (address, fullnode) = kv.pair(); + let need_ping = fullnode.recent_states.back().map_or(true, |s| { + Self::need_ping(s.timestamp.unwrap_or_default(), Duration::from_secs(5)) + }); + if need_ping { + let address = address.clone(); + let client = fullnode.client.clone(); + s.spawn(async move { + let _ = self.ping_fullnode(address, client).await; + }); + } + } + + for kv in &self.live_data_services { + let (address, live_data_service) = kv.pair(); + let need_ping = live_data_service.recent_states.back().map_or(true, |s| { + Self::need_ping(s.timestamp.unwrap_or_default(), Duration::from_secs(5)) + }); + if need_ping { + let address = address.clone(); + let client = live_data_service.client.clone(); + s.spawn(async move { + let _ = self.ping_live_data_service(address, client).await; + }); + } + } + + for kv in &self.historical_data_services { + let (address, historical_data_service) = kv.pair(); + let need_ping = + historical_data_service + .recent_states + .back() + .map_or(true, |s| { + Self::need_ping( + s.timestamp.unwrap_or_default(), + Duration::from_secs(5), + ) + }); + if need_ping { + let address = address.clone(); + let client = historical_data_service.client.clone(); + s.spawn(async move { + let _ = self.ping_historical_data_service(address, client).await; + }); + } + } + }); + + tokio::time::sleep(Duration::from_secs(1)).await; + } + } + + pub(crate) fn handle_heartbeat( + &self, + address: String, + service_type: ServiceType, + ) -> Result<()> { + match service_type { + ServiceType::LiveDataServiceInfo(info) => { + self.handle_live_data_service_info(address, info) + }, + ServiceType::HistoricalDataServiceInfo(info) => { + self.handle_historical_data_service_info(address, info) + }, + ServiceType::FullnodeInfo(info) => self.handle_fullnode_info(address, info), + ServiceType::GrpcManagerInfo(info) => self.handle_grpc_manager_info(address, info), + } + } + + pub(crate) fn get_fullnode_for_request(&self) -> FullnodeDataClient { + let mut rng = thread_rng(); + // TODO(grao): Filter out bad FNs. + self.fullnodes + .iter() + .choose(&mut rng) + .map(|kv| kv.value().client.clone()) + .unwrap() + } + + pub(crate) fn get_fullnodes_info(&self) -> HashMap> { + self.fullnodes + .iter() + .map(|entry| (entry.key().clone(), entry.value().recent_states.clone())) + .collect() + } + + pub(crate) fn get_live_data_services_info(&self) -> HashMap> { + self.live_data_services + .iter() + .map(|entry| (entry.key().clone(), entry.value().recent_states.clone())) + .collect() + } + + pub(crate) fn get_historical_data_services_info( + &self, + ) -> HashMap> { + self.historical_data_services + .iter() + .map(|entry| (entry.key().clone(), entry.value().recent_states.clone())) + .collect() + } + + pub(crate) fn get_known_latest_version(&self) -> u64 { + self.known_latest_version.load(Ordering::SeqCst) + } + + fn update_known_latest_version(&self, version: u64) { + self.known_latest_version + .fetch_max(version, Ordering::SeqCst); + } + + async fn heartbeat(&self, mut client: GrpcManagerClient) -> Result<()> { + let grpc_manager_info = GrpcManagerInfo { + timestamp: Some(timestamp_now_proto()), + known_latest_version: Some(self.get_known_latest_version()), + master_address: None, + }; + let service_info = ServiceInfo { + address: Some(self.self_advertised_address.clone()), + service_type: Some(ServiceType::GrpcManagerInfo(grpc_manager_info)), + }; + let request = HeartbeatRequest { + service_info: Some(service_info), + }; + let _ = client.heartbeat(request).await?; + + Ok(()) + } + + async fn ping_fullnode( + &self, + address: String, + mut client: FullnodeDataClient, + ) -> Result<()> { + trace!("Pinging fullnode {address}."); + let request = PingFullnodeRequest {}; + let response = client.ping(request).await?; + if let Some(info) = response.into_inner().info { + self.handle_fullnode_info(address, info) + } else { + bail!("Bad response.") + } + } + + async fn ping_live_data_service( + &self, + address: String, + mut client: DataServiceClient, + ) -> Result<()> { + let request = PingDataServiceRequest { + known_latest_version: Some(self.get_known_latest_version()), + }; + let response = client.ping(request).await?; + if let Some(info) = response.into_inner().info { + self.handle_live_data_service_info(address, info) + } else { + bail!("Bad response.") + } + } + + async fn ping_historical_data_service( + &self, + address: String, + mut client: DataServiceClient, + ) -> Result<()> { + let request = PingDataServiceRequest { + known_latest_version: Some(self.get_known_latest_version()), + }; + let response = client.ping(request).await?; + if let Some(info) = response.into_inner().info { + self.handle_historical_data_service_info(address, info) + } else { + bail!("Bad response.") + } + } + + fn handle_live_data_service_info(&self, address: String, info: DataServiceInfo) -> Result<()> { + let mut entry = self + .live_data_services + .entry(address.clone()) + .or_insert(LiveDataService::new(address)); + entry.value_mut().recent_states.push_back(info); + if entry.value().recent_states.len() > MAX_NUM_OF_STATES_TO_KEEP { + entry.value_mut().recent_states.pop_front(); + } + + Ok(()) + } + + fn handle_historical_data_service_info( + &self, + address: String, + info: DataServiceInfo, + ) -> Result<()> { + let mut entry = self + .historical_data_services + .entry(address.clone()) + .or_insert(HistoricalDataService::new(address)); + entry.value_mut().recent_states.push_back(info); + if entry.value().recent_states.len() > MAX_NUM_OF_STATES_TO_KEEP { + entry.value_mut().recent_states.pop_front(); + } + + Ok(()) + } + + fn handle_fullnode_info(&self, address: String, info: FullnodeInfo) -> Result<()> { + let mut entry = self + .fullnodes + .entry(address.clone()) + .or_insert(Fullnode::new(address.clone())); + entry.value_mut().recent_states.push_back(info); + if let Some(known_latest_version) = info.known_latest_version { + trace!( + "Received known_latest_version ({known_latest_version}) from fullnode {address}." + ); + self.update_known_latest_version(known_latest_version); + } + if entry.value().recent_states.len() > MAX_NUM_OF_STATES_TO_KEEP { + entry.value_mut().recent_states.pop_front(); + } + + Ok(()) + } + + fn handle_grpc_manager_info(&self, address: String, info: GrpcManagerInfo) -> Result<()> { + let mut entry = self + .grpc_managers + .entry(address.clone()) + .or_insert(Peer::new(address)); + entry.value_mut().recent_states.push_back(info); + if entry.value().recent_states.len() > MAX_NUM_OF_STATES_TO_KEEP { + entry.value_mut().recent_states.pop_front(); + } + + Ok(()) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/processor.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/processor.rs deleted file mode 100644 index e9e18d0fb6440..0000000000000 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/processor.rs +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::metrics::{METADATA_UPLOAD_FAILURE_COUNT, PROCESSED_VERSIONS_COUNT}; -use anyhow::{ensure, Context, Result}; -use aptos_indexer_grpc_utils::{ - cache_operator::CacheOperator, - compression_util::{FileStoreMetadata, StorageFormat, FILE_ENTRY_TRANSACTION_COUNT}, - config::IndexerGrpcFileStoreConfig, - counters::{log_grpc_step, IndexerGrpcStep}, - file_store_operator::FileStoreOperator, - types::RedisUrl, -}; -use aptos_moving_average::MovingAverage; -use std::time::Duration; -use tracing::debug; - -// If the version is ahead of the cache head, retry after a short sleep. -const AHEAD_OF_CACHE_SLEEP_DURATION_IN_MILLIS: u64 = 100; -const SERVICE_TYPE: &str = "file_worker"; -const MAX_CONCURRENT_BATCHES: usize = 50; - -/// Processor tails the data in cache and stores the data in file store. -pub struct Processor { - cache_operator: CacheOperator, - file_store_operator: Box, - chain_id: u64, -} - -impl Processor { - pub async fn new( - redis_main_instance_address: RedisUrl, - file_store_config: IndexerGrpcFileStoreConfig, - chain_id: u64, - enable_cache_compression: bool, - ) -> Result { - let cache_storage_format = if enable_cache_compression { - StorageFormat::Lz4CompressedProto - } else { - StorageFormat::Base64UncompressedProto - }; - - // Connection to redis is a hard dependency for file store processor. - let conn = redis::Client::open(redis_main_instance_address.0.clone()) - .with_context(|| { - format!( - "Create redis client for {} failed", - redis_main_instance_address.0 - ) - })? - .get_tokio_connection_manager() - .await - .with_context(|| { - format!( - "Create redis connection to {} failed.", - redis_main_instance_address.0 - ) - })?; - let mut cache_operator = CacheOperator::new(conn, cache_storage_format); - - let mut file_store_operator: Box = file_store_config.create(); - file_store_operator.verify_storage_bucket_existence().await; - let file_store_metadata: Option = - file_store_operator.get_file_store_metadata().await; - if file_store_metadata.is_none() { - // If metadata doesn't exist, create and upload it and init file store latest version in cache. - while file_store_operator - .update_file_store_metadata_with_timeout(chain_id, 0) - .await - .is_err() - { - tracing::error!( - batch_start_version = 0, - service_type = SERVICE_TYPE, - "[File worker] Failed to update file store metadata. Retrying." - ); - std::thread::sleep(std::time::Duration::from_millis(500)); - METADATA_UPLOAD_FAILURE_COUNT.inc(); - } - } - // Metadata is guaranteed to exist now - let metadata = file_store_operator.get_file_store_metadata().await.unwrap(); - - ensure!(metadata.chain_id == chain_id, "Chain ID mismatch."); - let batch_start_version = metadata.version; - // Cache config in the cache - cache_operator.cache_setup_if_needed().await?; - match cache_operator.get_chain_id().await? { - Some(id) => { - ensure!(id == chain_id, "Chain ID mismatch."); - }, - None => { - cache_operator.set_chain_id(chain_id).await?; - }, - } - cache_operator - .update_file_store_latest_version(batch_start_version) - .await?; - Ok(Self { - cache_operator, - file_store_operator, - chain_id, - }) - } - - /// Starts the processing. The steps are - /// 1. Check chain id at the beginning and every step after - /// 2. Get the batch start version from file store metadata - /// 3. Start loop - /// 3.1 Check head from cache, decide whether we need to parallel process or just wait - /// 3.2 If we're ready to process, create max of 10 threads and fetch / upload data - /// 3.3 Update file store metadata at the end of a batch - pub async fn run(&mut self) -> Result<()> { - let chain_id = self.chain_id; - - let metadata = self - .file_store_operator - .get_file_store_metadata() - .await - .unwrap(); - ensure!(metadata.chain_id == chain_id, "Chain ID mismatch."); - - let mut batch_start_version = metadata.version; - - let mut tps_calculator = MovingAverage::new(10_000); - loop { - let latest_loop_time = std::time::Instant::now(); - let cache_worker_latest = self.cache_operator.get_latest_version().await?.unwrap(); - - // batches tracks the start version of the batches to fetch. 1000 at the time - let mut batches = vec![]; - let mut start_version = batch_start_version; - while start_version + (FILE_ENTRY_TRANSACTION_COUNT) < cache_worker_latest { - batches.push(start_version); - start_version += FILE_ENTRY_TRANSACTION_COUNT; - if batches.len() >= MAX_CONCURRENT_BATCHES { - break; - } - } - // we're too close to the head - if batches.is_empty() { - debug!( - batch_start_version = batch_start_version, - cache_worker_latest = cache_worker_latest, - "[Filestore] No enough version yet, need 1000 versions at least" - ); - tokio::time::sleep(Duration::from_millis( - AHEAD_OF_CACHE_SLEEP_DURATION_IN_MILLIS, - )) - .await; - continue; - } - - // Create thread and fetch transactions - let mut tasks = vec![]; - - for start_version in batches { - let mut cache_operator_clone = self.cache_operator.clone(); - let mut file_store_operator_clone = self.file_store_operator.clone_box(); - let task = tokio::spawn(async move { - let fetch_start_time = std::time::Instant::now(); - let transactions = cache_operator_clone - .get_transactions(start_version, FILE_ENTRY_TRANSACTION_COUNT) - .await - .unwrap(); - let last_transaction = transactions.last().unwrap().clone(); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::FilestoreFetchTxns, - Some(start_version as i64), - Some((start_version + FILE_ENTRY_TRANSACTION_COUNT - 1) as i64), - None, - None, - Some(fetch_start_time.elapsed().as_secs_f64()), - None, - Some(FILE_ENTRY_TRANSACTION_COUNT as i64), - None, - ); - for (i, txn) in transactions.iter().enumerate() { - assert_eq!(txn.version, start_version + i as u64); - } - let upload_start_time = std::time::Instant::now(); - let (start, end) = file_store_operator_clone - .upload_transaction_batch(chain_id, transactions) - .await - .unwrap(); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::FilestoreUploadTxns, - Some(start_version as i64), - Some((start_version + FILE_ENTRY_TRANSACTION_COUNT - 1) as i64), - None, - None, - Some(upload_start_time.elapsed().as_secs_f64()), - None, - Some(FILE_ENTRY_TRANSACTION_COUNT as i64), - None, - ); - - (start, end, last_transaction) - }); - tasks.push(task); - } - let (first_version, last_version, first_version_encoded, last_version_encoded) = - match futures::future::try_join_all(tasks).await { - Ok(mut res) => { - // Check for gaps - res.sort_by(|a, b| a.0.cmp(&b.0)); - let mut prev_start = None; - let mut prev_end = None; - - let first_version = res.first().unwrap().0; - let last_version = res.last().unwrap().1; - let first_version_encoded = res.first().unwrap().2.clone(); - let last_version_encoded = res.last().unwrap().2.clone(); - let versions: Vec = res.iter().map(|x| x.0).collect(); - for result in res { - let start = result.0; - let end = result.1; - if prev_start.is_none() { - prev_start = Some(start); - prev_end = Some(end); - } else { - if prev_end.unwrap() + 1 != start { - tracing::error!( - processed_versions = ?versions, - "[Filestore] Gaps in processing data" - ); - panic!("[Filestore] Gaps in processing data"); - } - prev_start = Some(start); - prev_end = Some(end); - } - } - - ( - first_version, - last_version, - first_version_encoded, - last_version_encoded, - ) - }, - Err(err) => panic!("Error processing transaction batches: {:?}", err), - }; - - // update next batch start version - batch_start_version = last_version + 1; - assert!( - batch_start_version % FILE_ENTRY_TRANSACTION_COUNT == 0, - "[Filestore] Batch must be multiple of 1000" - ); - let size = last_version - first_version + 1; - PROCESSED_VERSIONS_COUNT.inc_by(size); - tps_calculator.tick_now(size); - - // Update filestore metadata. First do it in cache for performance then update metadata file - let start_metadata_upload_time = std::time::Instant::now(); - self.cache_operator - .update_file_store_latest_version(batch_start_version) - .await?; - while self - .file_store_operator - .update_file_store_metadata_with_timeout(chain_id, batch_start_version) - .await - .is_err() - { - tracing::error!( - batch_start_version = batch_start_version, - "Failed to update file store metadata. Retrying." - ); - std::thread::sleep(std::time::Duration::from_millis(500)); - METADATA_UPLOAD_FAILURE_COUNT.inc(); - } - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::FilestoreUpdateMetadata, - Some(first_version as i64), - Some(last_version as i64), - None, - None, - Some(start_metadata_upload_time.elapsed().as_secs_f64()), - None, - Some(size as i64), - None, - ); - - let start_version_timestamp = first_version_encoded.timestamp; - let end_version_timestamp = last_version_encoded.timestamp; - let full_loop_duration = latest_loop_time.elapsed().as_secs_f64(); - log_grpc_step( - SERVICE_TYPE, - IndexerGrpcStep::FilestoreProcessedBatch, - Some(first_version as i64), - Some(last_version as i64), - start_version_timestamp.as_ref(), - end_version_timestamp.as_ref(), - Some(full_loop_duration), - None, - Some(size as i64), - None, - ); - } - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/service.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/service.rs new file mode 100644 index 0000000000000..6072906233f81 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/service.rs @@ -0,0 +1,84 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{data_manager::DataManager, metadata_manager::MetadataManager}; +use aptos_protos::indexer::v1::{ + grpc_manager_server::GrpcManager, service_info::ServiceType, GetTransactionsRequest, + HeartbeatRequest, HeartbeatResponse, TransactionsResponse, +}; +use std::sync::Arc; +use tonic::{Request, Response, Status}; + +const MAX_BATCH_SIZE: usize = 5 * (1 << 20); + +pub struct GrpcManagerService { + chain_id: u64, + metadata_manager: Arc, + data_manager: Arc, +} + +impl GrpcManagerService { + pub(crate) fn new( + chain_id: u64, + metadata_manager: Arc, + data_manager: Arc, + ) -> Self { + Self { + chain_id, + metadata_manager, + data_manager, + } + } + + async fn handle_heartbeat( + &self, + address: String, + service_type: ServiceType, + ) -> anyhow::Result> { + self.metadata_manager + .handle_heartbeat(address, service_type)?; + + Ok(Response::new(HeartbeatResponse { + known_latest_version: Some(self.metadata_manager.get_known_latest_version()), + })) + } +} + +#[tonic::async_trait] +impl GrpcManager for GrpcManagerService { + async fn heartbeat( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + if let Some(service_info) = request.service_info { + if let Some(address) = service_info.address { + if let Some(service_type) = service_info.service_type { + return self + .handle_heartbeat(address, service_type) + .await + .map_err(|e| Status::internal(&format!("Error handling heartbeat: {e}"))); + } + } + } + + Err(Status::invalid_argument("Bad request.")) + } + + async fn get_transactions( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let transactions = self + .data_manager + .get_transactions(request.starting_version(), MAX_BATCH_SIZE) + .await + .map_err(|e| Status::internal(format!("{e}")))?; + + Ok(Response::new(TransactionsResponse { + transactions, + chain_id: Some(self.chain_id), + })) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs index 4b6d290e35cef..388e76aa3b534 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs @@ -2,15 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{counters::CHANNEL_SIZE, stream_coordinator::IndexerStreamCoordinator, ServiceContext}; -use aptos_indexer_grpc_utils::counters::{log_grpc_step_fullnode, IndexerGrpcStep}; +use aptos_indexer_grpc_utils::{ + counters::{log_grpc_step_fullnode, IndexerGrpcStep}, + timestamp_now_proto, +}; use aptos_logger::{error, info}; use aptos_moving_average::MovingAverage; -use aptos_protos::internal::fullnode::v1::{ - fullnode_data_server::FullnodeData, stream_status::StatusType, transactions_from_node_response, - GetTransactionsFromNodeRequest, StreamStatus, TransactionsFromNodeResponse, +use aptos_protos::{ + indexer::v1::FullnodeInfo, + internal::fullnode::v1::{ + fullnode_data_server::FullnodeData, stream_status::StatusType, + transactions_from_node_response, GetTransactionsFromNodeRequest, PingFullnodeRequest, + PingFullnodeResponse, StreamStatus, TransactionsFromNodeResponse, + }, + util::timestamp::Timestamp, }; use futures::Stream; -use std::pin::Pin; +use std::{ + pin::Pin, + time::{SystemTime, UNIX_EPOCH}, +}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status}; @@ -156,6 +167,25 @@ impl FullnodeData for FullnodeDataService { Box::pin(output_stream) as Self::GetTransactionsFromNodeStream )) } + + async fn ping( + &self, + _request: Request, + ) -> Result, Status> { + let timestamp = timestamp_now_proto(); + + let info = FullnodeInfo { + timestamp: Some(timestamp), + known_latest_version: self + .service_context + .context + .db + .get_synced_version() + .map_err(|e| Status::internal(format!("{e}")))?, + }; + let response = PingFullnodeResponse { info: Some(info) }; + Ok(Response::new(response)) + } } pub fn get_status( diff --git a/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs index ce4a68249e82a..6b0039e84a743 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs @@ -13,7 +13,11 @@ use std::convert::Infallible; use std::{fs::File, io::Read, panic::PanicInfo, path::PathBuf, process}; use tracing::error; use tracing_subscriber::EnvFilter; -use warp::{http::Response, Filter}; +use warp::{ + http::{Response, StatusCode}, + reply::Reply, + Filter, +}; /// ServerArgs bootstraps a server with all common pieces. And then triggers the run method for /// the specific service. @@ -45,8 +49,9 @@ where { let health_port = config.health_check_port; // Start liveness and readiness probes. + let config_clone = config.clone(); let task_handler = tokio::spawn(async move { - register_probes_and_metrics_handler(health_port).await; + register_probes_and_metrics_handler(config_clone, health_port).await; anyhow::Ok(()) }); let main_task_handler = @@ -71,7 +76,7 @@ where } } -#[derive(Deserialize, Debug, Serialize)] +#[derive(Deserialize, Clone, Debug, Serialize)] pub struct GenericConfig { // Shared configuration among all services. pub health_check_port: u16, @@ -96,11 +101,15 @@ where fn get_server_name(&self) -> String { self.server_config.get_server_name() } + + async fn status_page(&self) -> Result { + self.server_config.status_page().await + } } /// RunnableConfig is a trait that all services must implement for their configuration. #[async_trait::async_trait] -pub trait RunnableConfig: DeserializeOwned + Send + Sync + 'static { +pub trait RunnableConfig: Clone + DeserializeOwned + Send + Sync + 'static { // Validate the config. fn validate(&self) -> Result<()> { Ok(()) @@ -111,6 +120,10 @@ pub trait RunnableConfig: DeserializeOwned + Send + Sync + 'static { // Get the server name. fn get_server_name(&self) -> String; + + async fn status_page(&self) -> Result { + Ok("Status page is not found.".into_response()) + } } /// Parse a yaml file into a struct. @@ -181,7 +194,10 @@ pub fn setup_logging(make_writer: Option Box } /// Register readiness and liveness probes and set up metrics endpoint. -async fn register_probes_and_metrics_handler(port: u16) { +async fn register_probes_and_metrics_handler(config: GenericConfig, port: u16) +where + C: RunnableConfig, +{ let readiness = warp::path("readiness") .map(move || warp::reply::with_status("ready", warp::http::StatusCode::OK)); @@ -201,6 +217,11 @@ async fn register_probes_and_metrics_handler(port: u16) { .body(encode_buffer) }); + let status_endpoint = warp::path::end().and_then(move || { + let config = config.clone(); + async move { config.status_page().await } + }); + if cfg!(target_os = "linux") { #[cfg(target_os = "linux")] let profilez = warp::path("profilez").and_then(|| async move { @@ -228,11 +249,16 @@ async fn register_probes_and_metrics_handler(port: u16) { }) }); #[cfg(target_os = "linux")] - warp::serve(readiness.or(metrics_endpoint).or(profilez)) - .run(([0, 0, 0, 0], port)) - .await; + warp::serve( + readiness + .or(metrics_endpoint) + .or(status_endpoint) + .or(profilez), + ) + .run(([0, 0, 0, 0], port)) + .await; } else { - warp::serve(readiness.or(metrics_endpoint)) + warp::serve(readiness.or(metrics_endpoint).or(status_endpoint)) .run(([0, 0, 0, 0], port)) .await; } diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml index 2c0516ddda804..61374b3629270 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml @@ -19,6 +19,7 @@ aptos-protos = { workspace = true } async-trait = { workspace = true } backoff = { workspace = true } base64 = { workspace = true } +build_html = { workspace = true } chrono = { workspace = true } cloud-storage = { workspace = true } dashmap = { workspace = true } @@ -34,7 +35,9 @@ ripemd = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } +tokio-stream = { workspace = true } tokio-util = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } url = { workspace = true } +warp = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs index 7aad300d5730e..d2e0e85c3ae95 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs @@ -383,14 +383,18 @@ impl CacheOperator { let start_time = std::time::Instant::now(); let mut transactions = vec![]; for encoded_transaction in encoded_transactions { + if encoded_transaction.is_empty() { + break; + } let cache_entry: CacheEntry = CacheEntry::new(encoded_transaction, self.storage_format); let transaction = cache_entry.into_transaction(); transactions.push(transaction); } + /* ensure!( transactions.len() == transaction_count as usize, "Failed to get all transactions from cache." - ); + );*/ let decoding_duration = start_time.elapsed().as_secs_f64(); Ok((transactions, io_duration, decoding_duration)) } diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs index 07f528e6df124..826cbfb6f54a5 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/compression_util.rs @@ -65,6 +65,7 @@ impl FileStoreMetadata { } } +#[derive(Debug)] pub enum CacheEntry { Lz4CompressionProto(Vec), // Only used for legacy cache entry. @@ -147,7 +148,9 @@ impl CacheEntry { decompressor .read_to_end(&mut decompressed) .expect("Lz4 decompression failed."); - Transaction::decode(decompressed.as_slice()).expect("proto deserialization failed.") + let res = Transaction::decode(decompressed.as_slice()) + .expect("proto deserialization failed."); + res }, CacheEntry::Base64UncompressedProto(bytes) => { let bytes: Vec = base64::decode(bytes).expect("base64 decoding failed."); @@ -197,13 +200,14 @@ impl FileEntry { .first() .expect("Cannot build empty file") .version; + /* let transactions_count = transactions.len(); if transactions_count % FILE_ENTRY_TRANSACTION_COUNT as usize != 0 { panic!("The number of transactions to upload has to be a multiple of FILE_ENTRY_TRANSACTION_COUNT.") } if starting_version % FILE_ENTRY_TRANSACTION_COUNT != 0 { panic!("Starting version has to be a multiple of FILE_ENTRY_TRANSACTION_COUNT.") - } + }*/ match storage_format { StorageFormat::Lz4CompressedProto => { let t = TransactionsInStorage { diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/config.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/config.rs index 56e32a33ab591..c5929afd881cb 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/config.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/config.rs @@ -43,6 +43,26 @@ impl Default for IndexerGrpcFileStoreConfig { } impl IndexerGrpcFileStoreConfig { + pub async fn create_filestore(self) -> Box { + match self { + IndexerGrpcFileStoreConfig::GcsFileStore(gcs_file_store) => Box::new( + crate::file_store_operator_v2::gcs::GcsFileStore::new( + gcs_file_store.gcs_file_store_bucket_name, + gcs_file_store.gcs_file_store_bucket_sub_dir, + gcs_file_store + .gcs_file_store_service_account_key_path + .clone(), + ) + .await, + ), + IndexerGrpcFileStoreConfig::LocalFileStore(local_file_store) => { + Box::new(crate::file_store_operator_v2::local::LocalFileStore::new( + local_file_store.local_file_store_path, + )) + }, + } + } + pub fn create(&self) -> Box { match self { IndexerGrpcFileStoreConfig::GcsFileStore(gcs_file_store) => { diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/gcs.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/gcs.rs new file mode 100644 index 0000000000000..c1490179e02b7 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/gcs.rs @@ -0,0 +1,89 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::file_store_operator_v2::FileStore; +use anyhow::{bail, Result}; +use cloud_storage::{Bucket, Object}; +use std::{env, path::PathBuf}; +use tracing::info; + +const JSON_FILE_TYPE: &str = "application/json"; +// The environment variable to set the service account path. +const SERVICE_ACCOUNT_ENV_VAR: &str = "SERVICE_ACCOUNT"; + +pub struct GcsFileStore { + bucket_name: String, + bucket_sub_dir: Option, +} + +impl GcsFileStore { + pub async fn new( + bucket_name: String, + bucket_sub_dir: Option, + service_account_path: String, + ) -> Self { + env::set_var(SERVICE_ACCOUNT_ENV_VAR, service_account_path); + + info!( + bucket_name = bucket_name, + "Verifying the bucket exists for GcsFileStore." + ); + + Bucket::read(&bucket_name) + .await + .expect("Failed to read bucket."); + + Self { + bucket_name, + bucket_sub_dir, + } + } + + fn get_path(&self, file_path: PathBuf) -> String { + if let Some(sub_dir) = &self.bucket_sub_dir { + let mut path = sub_dir.clone(); + path.push(file_path); + path.to_string_lossy().into_owned() + } else { + file_path.to_string_lossy().into_owned() + } + } +} + +#[async_trait::async_trait] +impl FileStore for GcsFileStore { + fn tag(&self) -> &str { + "GCS" + } + + async fn get_raw_file(&self, file_path: PathBuf) -> Result>> { + let path = self.get_path(file_path); + match Object::download(&self.bucket_name, path.as_str()).await { + Ok(file) => Ok(Some(file)), + Err(cloud_storage::Error::Other(err)) => { + if err.contains("No such object: ") { + Ok(None) + } else { + bail!("[Indexer File] Error happens when downloading file at {path:?}. {err}",); + } + }, + Err(err) => { + bail!("[Indexer File] Error happens when downloading file at {path:?}. {err}"); + }, + } + } + + async fn save_raw_file(&self, file_path: PathBuf, data: Vec) -> Result<()> { + let path = self.get_path(file_path); + Object::create( + self.bucket_name.as_str(), + data, + path.as_str(), + JSON_FILE_TYPE, + ) + .await + .map_err(anyhow::Error::msg)?; + + Ok(()) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/local.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/local.rs new file mode 100644 index 0000000000000..c8e26a3defee4 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/local.rs @@ -0,0 +1,56 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::file_store_operator_v2::FileStore; +use anyhow::{bail, Result}; +use std::path::PathBuf; +use tracing::info; + +#[derive(Clone)] +pub struct LocalFileStore { + path: PathBuf, +} + +impl LocalFileStore { + pub fn new(path: PathBuf) -> Self { + info!( + path = path.to_str().unwrap(), + "Verifying the path exists for LocalFileStore." + ); + if !path.exists() { + panic!("LocalFileStore path does not exist."); + } + Self { path } + } +} + +#[async_trait::async_trait] +impl FileStore for LocalFileStore { + fn tag(&self) -> &str { + "LOCAL" + } + + async fn get_raw_file(&self, file_path: PathBuf) -> Result>> { + let file_path = self.path.join(file_path); + match tokio::fs::read(&file_path).await { + Ok(file) => Ok(Some(file)), + Err(err) => { + if err.kind() == std::io::ErrorKind::NotFound { + Ok(None) + } else { + bail!("[Indexer File] Error happens when getting file at {file_path:?}. {err}"); + } + }, + } + } + + async fn save_raw_file(&self, file_path: PathBuf, data: Vec) -> Result<()> { + let file_path = self.path.join(file_path); + if let Some(parent) = file_path.parent() { + tokio::fs::create_dir_all(parent).await?; + } + tokio::fs::write(file_path, data) + .await + .map_err(anyhow::Error::msg) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/mod.rs new file mode 100644 index 0000000000000..09739eac56e64 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/file_store_operator_v2/mod.rs @@ -0,0 +1,229 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod gcs; +pub mod local; + +use crate::{ + compression_util::{FileEntry, FileStoreMetadata, StorageFormat}, + counters::TRANSACTION_STORE_FETCH_RETRIES, +}; +use anyhow::Result; +use aptos_protos::transaction::v1::Transaction; +use serde::{Deserialize, Serialize}; +use std::{ + ops::Deref, + path::PathBuf, + sync::atomic::{AtomicU64, Ordering}, +}; +use tokio::sync::mpsc::Sender; + +const METADATA_FILE_NAME: &str = "metadata.json"; + +#[derive(Serialize, Deserialize, Default)] +pub struct BatchMetadata { + pub files: Vec<(u64, usize)>, +} + +#[async_trait::async_trait] +pub trait FileStore: Sync + Send { + /// The tag of the store, for logging. + fn tag(&self) -> &str; + + async fn get_raw_file(&self, file_path: PathBuf) -> Result>>; + + async fn save_raw_file(&self, file_path: PathBuf, data: Vec) -> Result<()>; +} + +// TODO(grao): Split out the readonly part. +pub struct FileStoreOperatorV2 { + chain_id: u64, + file_store: Box, + num_transactions_per_folder: u64, + cached_file_store_version: AtomicU64, +} + +impl Deref for FileStoreOperatorV2 { + type Target = Box; + fn deref(&self) -> &Box { + &self.file_store + } +} + +impl FileStoreOperatorV2 { + pub fn new( + chain_id: u64, + file_store: Box, + num_transactions_per_folder: u64, + ) -> Self { + Self { + chain_id, + file_store, + num_transactions_per_folder, + cached_file_store_version: AtomicU64::new(0), + } + } + + pub async fn maybe_init_metadata(&self) -> Result<()> { + match self.get_file_store_metadata().await { + Some(_) => Ok(()), + None => self.update_file_store_metadata(0).await, + } + } + + pub fn get_path_for_version(&self, version: u64) -> PathBuf { + let mut buf = self.get_folder_name(version); + buf.push(format!("{}", version)); + buf + } + + pub fn get_path_for_batch_metadata(&self, version: u64) -> PathBuf { + let folder = self.get_folder_name(version); + let mut batch_metadata_path = PathBuf::new(); + batch_metadata_path.push(folder); + batch_metadata_path.push(METADATA_FILE_NAME); + batch_metadata_path + } + + pub async fn get_transaction_batch( + &self, + version: u64, + retries: u8, + max_files: Option, + tx: Sender>, + ) { + let batch_metadata = self.get_batch_metadata(version).await; + if batch_metadata.is_none() { + return; + } + + let batch_metadata = batch_metadata.unwrap(); + + let mut file_index = None; + for (i, (file_store_version, _)) in batch_metadata.files.iter().enumerate().rev() { + if *file_store_version <= version { + file_index = Some(i); + break; + } + } + + let file_index = file_index.expect("Must find file_index."); + let mut end_file_index = batch_metadata.files.len(); + if let Some(max_files) = max_files { + end_file_index = end_file_index.min(file_index.saturating_add(max_files)); + } + + for i in file_index..end_file_index { + let current_version = batch_metadata.files[i].0; + let transactions = self + .get_transaction_file_at_version(current_version, retries) + .await; + if let Ok(transactions) = transactions { + let num_to_skip = version.saturating_sub(current_version) as usize; + let result = if num_to_skip > 0 { + transactions.into_iter().skip(num_to_skip).collect() + } else { + transactions + }; + if tx.send(result).await.is_err() { + break; + } + } else { + break; + } + } + } + + /// Returns file store metadata, or None if not found. + pub async fn get_file_store_metadata(&self) -> Option { + self.file_store + .get_raw_file(PathBuf::from(METADATA_FILE_NAME)) + .await + .expect("Failed to get file store metadata.") + .map(|data| serde_json::from_slice(&data).expect("Metadata JSON is invalid.")) + } + + /// Updates the file store metadata. + pub async fn update_file_store_metadata(&self, version: u64) -> Result<()> { + let metadata = + FileStoreMetadata::new(self.chain_id, version, StorageFormat::Lz4CompressedProto); + + let raw_data = serde_json::to_vec(&metadata).map_err(anyhow::Error::msg)?; + self.file_store + .save_raw_file(PathBuf::from(METADATA_FILE_NAME), raw_data) + .await + } + + /// Returns the latest_version (next_version) that is going to be process by file store, or + /// None if the metadata file doesn't exist. + pub async fn get_latest_version(&self) -> Option { + let metadata = self.get_file_store_metadata().await; + let latest_version = metadata.map(|metadata| { + if metadata.chain_id != self.chain_id { + panic!("Wrong chain_id."); + } + metadata.version + }); + + if let Some(version) = latest_version { + self.cached_file_store_version + .fetch_max(version, Ordering::SeqCst); + } + + latest_version + } + + pub async fn can_serve(&self, version: u64) -> bool { + if self.cached_file_store_version.load(Ordering::SeqCst) > version { + return true; + } + + self.get_latest_version().await.unwrap() > version + } + + fn get_folder_name(&self, version: u64) -> PathBuf { + let mut buf = PathBuf::new(); + buf.push(format!("{}", version / self.num_transactions_per_folder)); + buf + } + + async fn get_transaction_file_at_version( + &self, + version: u64, + retries: u8, + ) -> Result> { + let mut retries = retries; + let bytes = loop { + let path = self.get_path_for_version(version); + match self.file_store.get_raw_file(path.clone()).await { + Ok(bytes) => break bytes.expect(&format!("File should exist: {path:?}.")), + Err(err) => { + TRANSACTION_STORE_FETCH_RETRIES + .with_label_values(&[self.file_store.tag()]) + .inc_by(1); + + if retries == 0 { + return Err(err); + } + retries -= 1; + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + }, + } + }; + + let transactions_in_storage = tokio::task::spawn_blocking(move || { + FileEntry::new(bytes, StorageFormat::Lz4CompressedProto).into_transactions_in_storage() + }) + .await?; + + Ok(transactions_in_storage.transactions) + } + + async fn get_batch_metadata(&self, version: u64) -> Option { + self.file_store + .get_raw_file(self.get_path_for_batch_metadata(version)) + .await + .expect("Failed to get batch metadata.") + .map(|data| serde_json::from_slice(&data).expect("Batch metadata JSON is invalid.")) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs index d76fdc5108225..54756a12fac19 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs @@ -7,7 +7,9 @@ pub mod config; pub mod constants; pub mod counters; pub mod file_store_operator; +pub mod file_store_operator_v2; pub mod in_memory_cache; +pub mod status_page; pub mod types; use anyhow::{Context, Result}; @@ -17,7 +19,7 @@ use aptos_protos::{ util::timestamp::Timestamp, }; use prost::Message; -use std::time::Duration; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tonic::codec::CompressionEncoding; use url::Url; @@ -95,9 +97,17 @@ pub async fn create_data_service_grpc_client( Ok(client) } +pub fn timestamp_now_proto() -> Timestamp { + let ts = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + Timestamp { + seconds: ts.as_secs() as i64, + nanos: ts.subsec_nanos() as i32, + } +} + pub fn time_diff_since_pb_timestamp_in_secs(timestamp: &Timestamp) -> f64 { - let current_timestamp = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) + let current_timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) .expect("SystemTime before UNIX EPOCH!") .as_secs_f64(); let transaction_time = timestamp.seconds as f64 + timestamp.nanos as f64 * 1e-9; diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/status_page/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/status_page/mod.rs new file mode 100644 index 0000000000000..2a017f843fec0 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/status_page/mod.rs @@ -0,0 +1,80 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use build_html::{Html, HtmlChild, HtmlContainer, HtmlElement, HtmlPage, HtmlTag}; +use warp::{ + reply::{html, Reply, Response}, + Rejection, +}; + +include!("html.rs"); + +pub struct Tab { + name: String, + content: HtmlChild, +} + +impl Tab { + pub fn new(name: &str, content: HtmlChild) -> Self { + Self { + name: name.to_string(), + content, + } + } +} + +pub fn render_status_page(tabs: Vec) -> Result { + let tab_names = tabs.iter().map(|tab| tab.name.clone()).collect::>(); + let tab_contents = tabs.into_iter().map(|tab| tab.content).collect::>(); + + let nav_bar = HtmlElement::new(HtmlTag::Div) + .with_attribute("id", "nav-bar") + .with_child( + tab_names + .into_iter() + .enumerate() + .fold( + HtmlElement::new(HtmlTag::UnorderedList), + |ul, (i, tab_name)| { + ul.with_child( + HtmlElement::new(HtmlTag::ListElement) + .with_attribute("onclick", format!("showTab({i})")) + .with_attribute("class", if i == 0 { "tab active" } else { "tab" }) + .with_child(tab_name.into()) + .into(), + ) + }, + ) + .into(), + ); + + let content = tab_contents.into_iter().enumerate().fold( + HtmlElement::new(HtmlTag::Div), + |div, (i, tab_content)| { + div.with_child( + HtmlElement::new(HtmlTag::Div) + .with_attribute("id", format!("tab-{i}")) + .with_attribute( + "style", + if i == 0 { + "display: block;" + } else { + "display: none;" + }, + ) + .with_child(tab_content) + .into(), + ) + }, + ); + + let page = HtmlPage::new() + .with_title("Status") + .with_style(STYLE) + .with_script_literal(SCRIPT) + .with_raw(nav_bar) + .with_raw(content) + .to_html_string(); + + Ok(html(page).into_response()) +} diff --git a/protos/proto/aptos/indexer/v1/grpc.proto b/protos/proto/aptos/indexer/v1/grpc.proto new file mode 100644 index 0000000000000..ea1f3f3313ff8 --- /dev/null +++ b/protos/proto/aptos/indexer/v1/grpc.proto @@ -0,0 +1,73 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package aptos.indexer.v1; + +import "aptos/indexer/v1/raw_data.proto"; +import "aptos/transaction/v1/transaction.proto"; +import "aptos/util/timestamp/timestamp.proto"; + +message ActiveStream { + optional string id = 1; + optional uint64 current_version = 2; + optional uint64 end_version = 3; +} + +message StreamInfo { + repeated ActiveStream active_streams = 1; +} + +message DataServiceInfo { + optional aptos.util.timestamp.Timestamp timestamp = 1; + optional uint64 known_latest_version = 2; + optional StreamInfo stream_info = 3; +} + +message FullnodeInfo { + optional aptos.util.timestamp.Timestamp timestamp = 1; + optional uint64 known_latest_version = 2; +} + +message GrpcManagerInfo { + optional aptos.util.timestamp.Timestamp timestamp = 1; + optional uint64 known_latest_version = 2; + optional string master_address = 3; +} + +message ServiceInfo { + optional string address = 1; + oneof service_type { + DataServiceInfo live_data_service_info = 2; + DataServiceInfo historical_data_service_info = 3; + FullnodeInfo fullnode_info = 4; + GrpcManagerInfo grpc_manager_info = 5; + } +} + +message HeartbeatRequest { + optional ServiceInfo service_info = 1; +} + +message HeartbeatResponse { + optional uint64 known_latest_version = 1; +} + +message PingDataServiceRequest { + optional uint64 known_latest_version = 1; +} + +message PingDataServiceResponse { + optional DataServiceInfo info = 1; +} + +service GrpcManager { + rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse); + rpc GetTransactions(GetTransactionsRequest) returns (TransactionsResponse); +} + +service DataService { + rpc Ping(PingDataServiceRequest) returns (PingDataServiceResponse); + rpc GetTransactions(GetTransactionsRequest) returns (stream TransactionsResponse); +} diff --git a/protos/proto/aptos/internal/fullnode/v1/fullnode_data.proto b/protos/proto/aptos/internal/fullnode/v1/fullnode_data.proto index b4e656550e16f..27e08c4fcec1a 100644 --- a/protos/proto/aptos/internal/fullnode/v1/fullnode_data.proto +++ b/protos/proto/aptos/internal/fullnode/v1/fullnode_data.proto @@ -6,6 +6,7 @@ syntax = "proto3"; package aptos.internal.fullnode.v1; import "aptos/transaction/v1/transaction.proto"; +import "aptos/indexer/v1/grpc.proto"; // Transaction data is transferred via 1 stream with batches until terminated. // One stream consists: @@ -52,6 +53,14 @@ message TransactionsFromNodeResponse { uint32 chain_id = 3; } +message PingFullnodeRequest { +} + +message PingFullnodeResponse { + optional aptos.indexer.v1.FullnodeInfo info = 1; +} + service FullnodeData { + rpc Ping(PingFullnodeRequest) returns (PingFullnodeResponse); rpc GetTransactionsFromNode(GetTransactionsFromNodeRequest) returns (stream TransactionsFromNodeResponse); } diff --git a/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2.py b/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2.py index 454819ebde23f..fde9eec632548 100644 --- a/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2.py +++ b/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2.py @@ -12,12 +12,13 @@ _sym_db = _symbol_database.Default() +from aptos.indexer.v1 import grpc_pb2 as aptos_dot_indexer_dot_v1_dot_grpc__pb2 from aptos.transaction.v1 import ( transaction_pb2 as aptos_dot_transaction_dot_v1_dot_transaction__pb2, ) DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\n.aptos/internal/fullnode/v1/fullnode_data.proto\x12\x1a\x61ptos.internal.fullnode.v1\x1a&aptos/transaction/v1/transaction.proto"M\n\x12TransactionsOutput\x12\x37\n\x0ctransactions\x18\x01 \x03(\x0b\x32!.aptos.transaction.v1.Transaction"\xf2\x01\n\x0cStreamStatus\x12\x41\n\x04type\x18\x01 \x01(\x0e\x32\x33.aptos.internal.fullnode.v1.StreamStatus.StatusType\x12\x15\n\rstart_version\x18\x02 \x01(\x04\x12\x1c\n\x0b\x65nd_version\x18\x03 \x01(\x04\x42\x02\x30\x01H\x00\x88\x01\x01"Z\n\nStatusType\x12\x1b\n\x17STATUS_TYPE_UNSPECIFIED\x10\x00\x12\x14\n\x10STATUS_TYPE_INIT\x10\x01\x12\x19\n\x15STATUS_TYPE_BATCH_END\x10\x02\x42\x0e\n\x0c_end_version"\x94\x01\n\x1eGetTransactionsFromNodeRequest\x12!\n\x10starting_version\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x88\x01\x01\x12#\n\x12transactions_count\x18\x02 \x01(\x04\x42\x02\x30\x01H\x01\x88\x01\x01\x42\x13\n\x11_starting_versionB\x15\n\x13_transactions_count"\xb8\x01\n\x1cTransactionsFromNodeResponse\x12:\n\x06status\x18\x01 \x01(\x0b\x32(.aptos.internal.fullnode.v1.StreamStatusH\x00\x12>\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32..aptos.internal.fullnode.v1.TransactionsOutputH\x00\x12\x10\n\x08\x63hain_id\x18\x03 \x01(\rB\n\n\x08response2\xa2\x01\n\x0c\x46ullnodeData\x12\x91\x01\n\x17GetTransactionsFromNode\x12:.aptos.internal.fullnode.v1.GetTransactionsFromNodeRequest\x1a\x38.aptos.internal.fullnode.v1.TransactionsFromNodeResponse0\x01\x62\x06proto3' + b'\n.aptos/internal/fullnode/v1/fullnode_data.proto\x12\x1a\x61ptos.internal.fullnode.v1\x1a&aptos/transaction/v1/transaction.proto\x1a\x1b\x61ptos/indexer/v1/grpc.proto"M\n\x12TransactionsOutput\x12\x37\n\x0ctransactions\x18\x01 \x03(\x0b\x32!.aptos.transaction.v1.Transaction"\xf2\x01\n\x0cStreamStatus\x12\x41\n\x04type\x18\x01 \x01(\x0e\x32\x33.aptos.internal.fullnode.v1.StreamStatus.StatusType\x12\x15\n\rstart_version\x18\x02 \x01(\x04\x12\x1c\n\x0b\x65nd_version\x18\x03 \x01(\x04\x42\x02\x30\x01H\x00\x88\x01\x01"Z\n\nStatusType\x12\x1b\n\x17STATUS_TYPE_UNSPECIFIED\x10\x00\x12\x14\n\x10STATUS_TYPE_INIT\x10\x01\x12\x19\n\x15STATUS_TYPE_BATCH_END\x10\x02\x42\x0e\n\x0c_end_version"\x94\x01\n\x1eGetTransactionsFromNodeRequest\x12!\n\x10starting_version\x18\x01 \x01(\x04\x42\x02\x30\x01H\x00\x88\x01\x01\x12#\n\x12transactions_count\x18\x02 \x01(\x04\x42\x02\x30\x01H\x01\x88\x01\x01\x42\x13\n\x11_starting_versionB\x15\n\x13_transactions_count"\xb8\x01\n\x1cTransactionsFromNodeResponse\x12:\n\x06status\x18\x01 \x01(\x0b\x32(.aptos.internal.fullnode.v1.StreamStatusH\x00\x12>\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32..aptos.internal.fullnode.v1.TransactionsOutputH\x00\x12\x10\n\x08\x63hain_id\x18\x03 \x01(\rB\n\n\x08response"\x15\n\x13PingFullnodeRequest"R\n\x14PingFullnodeResponse\x12\x31\n\x04info\x18\x01 \x01(\x0b\x32\x1e.aptos.indexer.v1.FullnodeInfoH\x00\x88\x01\x01\x42\x07\n\x05_info2\x8d\x02\n\x0c\x46ullnodeData\x12i\n\x04Ping\x12/.aptos.internal.fullnode.v1.PingFullnodeRequest\x1a\x30.aptos.internal.fullnode.v1.PingFullnodeResponse\x12\x91\x01\n\x17GetTransactionsFromNode\x12:.aptos.internal.fullnode.v1.GetTransactionsFromNodeRequest\x1a\x38.aptos.internal.fullnode.v1.TransactionsFromNodeResponse0\x01\x62\x06proto3' ) _globals = globals() @@ -37,16 +38,20 @@ _GETTRANSACTIONSFROMNODEREQUEST.fields_by_name[ "transactions_count" ]._serialized_options = b"0\001" - _globals["_TRANSACTIONSOUTPUT"]._serialized_start = 118 - _globals["_TRANSACTIONSOUTPUT"]._serialized_end = 195 - _globals["_STREAMSTATUS"]._serialized_start = 198 - _globals["_STREAMSTATUS"]._serialized_end = 440 - _globals["_STREAMSTATUS_STATUSTYPE"]._serialized_start = 334 - _globals["_STREAMSTATUS_STATUSTYPE"]._serialized_end = 424 - _globals["_GETTRANSACTIONSFROMNODEREQUEST"]._serialized_start = 443 - _globals["_GETTRANSACTIONSFROMNODEREQUEST"]._serialized_end = 591 - _globals["_TRANSACTIONSFROMNODERESPONSE"]._serialized_start = 594 - _globals["_TRANSACTIONSFROMNODERESPONSE"]._serialized_end = 778 - _globals["_FULLNODEDATA"]._serialized_start = 781 - _globals["_FULLNODEDATA"]._serialized_end = 943 + _globals["_TRANSACTIONSOUTPUT"]._serialized_start = 147 + _globals["_TRANSACTIONSOUTPUT"]._serialized_end = 224 + _globals["_STREAMSTATUS"]._serialized_start = 227 + _globals["_STREAMSTATUS"]._serialized_end = 469 + _globals["_STREAMSTATUS_STATUSTYPE"]._serialized_start = 363 + _globals["_STREAMSTATUS_STATUSTYPE"]._serialized_end = 453 + _globals["_GETTRANSACTIONSFROMNODEREQUEST"]._serialized_start = 472 + _globals["_GETTRANSACTIONSFROMNODEREQUEST"]._serialized_end = 620 + _globals["_TRANSACTIONSFROMNODERESPONSE"]._serialized_start = 623 + _globals["_TRANSACTIONSFROMNODERESPONSE"]._serialized_end = 807 + _globals["_PINGFULLNODEREQUEST"]._serialized_start = 809 + _globals["_PINGFULLNODEREQUEST"]._serialized_end = 830 + _globals["_PINGFULLNODERESPONSE"]._serialized_start = 832 + _globals["_PINGFULLNODERESPONSE"]._serialized_end = 914 + _globals["_FULLNODEDATA"]._serialized_start = 917 + _globals["_FULLNODEDATA"]._serialized_end = 1186 # @@protoc_insertion_point(module_scope) diff --git a/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2.pyi b/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2.pyi index cade8b80a51e8..933ae865867ab 100644 --- a/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2.pyi +++ b/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2.pyi @@ -4,6 +4,7 @@ from typing import Mapping as _Mapping from typing import Optional as _Optional from typing import Union as _Union +from aptos.indexer.v1 import grpc_pb2 as _grpc_pb2 from aptos.transaction.v1 import transaction_pb2 as _transaction_pb2 from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -75,3 +76,15 @@ class TransactionsFromNodeResponse(_message.Message): data: _Optional[_Union[TransactionsOutput, _Mapping]] = ..., chain_id: _Optional[int] = ..., ) -> None: ... + +class PingFullnodeRequest(_message.Message): + __slots__ = [] + def __init__(self) -> None: ... + +class PingFullnodeResponse(_message.Message): + __slots__ = ["info"] + INFO_FIELD_NUMBER: _ClassVar[int] + info: _grpc_pb2.FullnodeInfo + def __init__( + self, info: _Optional[_Union[_grpc_pb2.FullnodeInfo, _Mapping]] = ... + ) -> None: ... diff --git a/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2_grpc.py b/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2_grpc.py index bb869dab77dfd..a1926e4f4b7f9 100644 --- a/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2_grpc.py +++ b/protos/python/aptos_protos/aptos/internal/fullnode/v1/fullnode_data_pb2_grpc.py @@ -15,6 +15,11 @@ def __init__(self, channel): Args: channel: A grpc.Channel. """ + self.Ping = channel.unary_unary( + "/aptos.internal.fullnode.v1.FullnodeData/Ping", + request_serializer=aptos_dot_internal_dot_fullnode_dot_v1_dot_fullnode__data__pb2.PingFullnodeRequest.SerializeToString, + response_deserializer=aptos_dot_internal_dot_fullnode_dot_v1_dot_fullnode__data__pb2.PingFullnodeResponse.FromString, + ) self.GetTransactionsFromNode = channel.unary_stream( "/aptos.internal.fullnode.v1.FullnodeData/GetTransactionsFromNode", request_serializer=aptos_dot_internal_dot_fullnode_dot_v1_dot_fullnode__data__pb2.GetTransactionsFromNodeRequest.SerializeToString, @@ -25,6 +30,12 @@ def __init__(self, channel): class FullnodeDataServicer(object): """Missing associated documentation comment in .proto file.""" + def Ping(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + def GetTransactionsFromNode(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) @@ -34,6 +45,11 @@ def GetTransactionsFromNode(self, request, context): def add_FullnodeDataServicer_to_server(servicer, server): rpc_method_handlers = { + "Ping": grpc.unary_unary_rpc_method_handler( + servicer.Ping, + request_deserializer=aptos_dot_internal_dot_fullnode_dot_v1_dot_fullnode__data__pb2.PingFullnodeRequest.FromString, + response_serializer=aptos_dot_internal_dot_fullnode_dot_v1_dot_fullnode__data__pb2.PingFullnodeResponse.SerializeToString, + ), "GetTransactionsFromNode": grpc.unary_stream_rpc_method_handler( servicer.GetTransactionsFromNode, request_deserializer=aptos_dot_internal_dot_fullnode_dot_v1_dot_fullnode__data__pb2.GetTransactionsFromNodeRequest.FromString, @@ -50,6 +66,35 @@ def add_FullnodeDataServicer_to_server(servicer, server): class FullnodeData(object): """Missing associated documentation comment in .proto file.""" + @staticmethod + def Ping( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/aptos.internal.fullnode.v1.FullnodeData/Ping", + aptos_dot_internal_dot_fullnode_dot_v1_dot_fullnode__data__pb2.PingFullnodeRequest.SerializeToString, + aptos_dot_internal_dot_fullnode_dot_v1_dot_fullnode__data__pb2.PingFullnodeResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + @staticmethod def GetTransactionsFromNode( request, diff --git a/protos/rust/src/pb/aptos.indexer.v1.rs b/protos/rust/src/pb/aptos.indexer.v1.rs index b10551ffc515a..74c7ad308650b 100644 --- a/protos/rust/src/pb/aptos.indexer.v1.rs +++ b/protos/rust/src/pb/aptos.indexer.v1.rs @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // @generated +// This file is @generated by prost-build. /// This is for storage only. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsInStorage { /// Required; transactions data. @@ -13,8 +13,7 @@ pub struct TransactionsInStorage { #[prost(uint64, optional, tag="2")] pub starting_version: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetTransactionsRequest { /// Required; start version of current stream. #[prost(uint64, optional, tag="1")] @@ -29,7 +28,6 @@ pub struct GetTransactionsRequest { pub batch_size: ::core::option::Option, } /// TransactionsResponse is a batch of transactions. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsResponse { /// Required; transactions data. @@ -39,6 +37,86 @@ pub struct TransactionsResponse { #[prost(uint64, optional, tag="2")] pub chain_id: ::core::option::Option, } +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ActiveStream { + #[prost(string, optional, tag="1")] + pub id: ::core::option::Option<::prost::alloc::string::String>, + #[prost(uint64, optional, tag="2")] + pub current_version: ::core::option::Option, + #[prost(uint64, optional, tag="3")] + pub end_version: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StreamInfo { + #[prost(message, repeated, tag="1")] + pub active_streams: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DataServiceInfo { + #[prost(message, optional, tag="1")] + pub timestamp: ::core::option::Option, + #[prost(uint64, optional, tag="2")] + pub known_latest_version: ::core::option::Option, + #[prost(message, optional, tag="3")] + pub stream_info: ::core::option::Option, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct FullnodeInfo { + #[prost(message, optional, tag="1")] + pub timestamp: ::core::option::Option, + #[prost(uint64, optional, tag="2")] + pub known_latest_version: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GrpcManagerInfo { + #[prost(message, optional, tag="1")] + pub timestamp: ::core::option::Option, + #[prost(uint64, optional, tag="2")] + pub known_latest_version: ::core::option::Option, + #[prost(string, optional, tag="3")] + pub master_address: ::core::option::Option<::prost::alloc::string::String>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ServiceInfo { + #[prost(string, optional, tag="1")] + pub address: ::core::option::Option<::prost::alloc::string::String>, + #[prost(oneof="service_info::ServiceType", tags="2, 3, 4, 5")] + pub service_type: ::core::option::Option, +} +/// Nested message and enum types in `ServiceInfo`. +pub mod service_info { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum ServiceType { + #[prost(message, tag="2")] + LiveDataServiceInfo(super::DataServiceInfo), + #[prost(message, tag="3")] + HistoricalDataServiceInfo(super::DataServiceInfo), + #[prost(message, tag="4")] + FullnodeInfo(super::FullnodeInfo), + #[prost(message, tag="5")] + GrpcManagerInfo(super::GrpcManagerInfo), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HeartbeatRequest { + #[prost(message, optional, tag="1")] + pub service_info: ::core::option::Option, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct HeartbeatResponse { + #[prost(uint64, optional, tag="1")] + pub known_latest_version: ::core::option::Option, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct PingDataServiceRequest { + #[prost(uint64, optional, tag="1")] + pub known_latest_version: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PingDataServiceResponse { + #[prost(message, optional, tag="1")] + pub info: ::core::option::Option, +} /// Encoded file descriptor set for the `aptos.indexer.v1` package pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x0a, 0xce, 0x12, 0x0a, 0x1f, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, @@ -190,7 +268,292 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x29, 0x16, 0x2c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x29, 0x37, 0x3d, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x29, 0x3e, 0x52, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x33, 0x0a, 0xd2, 0x23, 0x0a, 0x1b, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x10, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, + 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x61, 0x77, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x61, 0x70, + 0x74, 0x6f, 0x73, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x02, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x48, 0x01, 0x52, 0x0e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x65, 0x6e, 0x64, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, 0x52, 0x0a, 0x65, + 0x6e, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x05, 0x0a, 0x03, + 0x5f, 0x69, 0x64, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x65, 0x6e, 0x64, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x53, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x45, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x0d, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x22, 0x87, 0x02, 0x0a, + 0x0f, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x42, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x75, 0x74, 0x69, 0x6c, + 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, + 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x48, 0x01, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, + 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x0b, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x02, + 0x52, 0x0a, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x42, + 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x17, 0x0a, + 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0xb0, 0x01, 0x0a, 0x0c, 0x46, 0x75, 0x6c, 0x6c, 0x6e, + 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x42, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x70, 0x74, + 0x6f, 0x73, 0x2e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x12, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, + 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, + 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x0f, 0x47, 0x72, + 0x70, 0x63, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x42, 0x0a, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x48, 0x00, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x88, 0x01, + 0x01, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, + 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, + 0x01, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x2a, 0x0a, 0x0e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x02, 0x52, 0x0d, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, + 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0xa0, + 0x03, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x58, 0x0a, + 0x16, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x48, 0x00, 0x52, 0x13, 0x6c, 0x69, 0x76, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x64, 0x0a, 0x1c, 0x68, 0x69, 0x73, 0x74, 0x6f, + 0x72, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x48, 0x00, 0x52, 0x19, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x63, 0x61, 0x6c, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x45, 0x0a, + 0x0d, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0c, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4f, 0x0a, 0x11, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x49, 0x6e, + 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x0e, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x22, 0x6a, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x70, + 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x63, 0x0a, + 0x11, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x35, 0x0a, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, + 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x48, 0x00, 0x52, 0x12, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x68, 0x0a, 0x16, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x14, + 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x12, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x88, 0x01, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x6c, 0x61, + 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5e, 0x0a, 0x17, + 0x50, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, + 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x32, 0xc8, 0x01, 0x0a, + 0x0b, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x09, + 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x22, 0x2e, 0x61, 0x70, 0x74, 0x6f, + 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, + 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, + 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x63, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x26, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xd1, 0x01, 0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, + 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x61, 0x70, 0x74, 0x6f, + 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, + 0x67, 0x44, 0x61, 0x74, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x26, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, + 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x83, 0x01, 0x0a, 0x14, + 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, + 0x72, 0x2e, 0x76, 0x31, 0x42, 0x09, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0xa2, 0x02, 0x03, 0x41, 0x49, 0x58, 0xaa, 0x02, 0x10, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x2e, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x10, 0x41, 0x70, 0x74, + 0x6f, 0x73, 0x5c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1c, + 0x41, 0x70, 0x74, 0x6f, 0x73, 0x5c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x5c, 0x56, 0x31, + 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x41, + 0x70, 0x74, 0x6f, 0x73, 0x3a, 0x3a, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x3a, 0x3a, 0x56, + 0x31, 0x4a, 0x9a, 0x10, 0x0a, 0x06, 0x12, 0x04, 0x03, 0x00, 0x48, 0x01, 0x0a, 0x4e, 0x0a, 0x01, + 0x0c, 0x12, 0x03, 0x03, 0x00, 0x12, 0x32, 0x44, 0x20, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, + 0x68, 0x74, 0x20, 0xc2, 0xa9, 0x20, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x20, 0x46, 0x6f, 0x75, 0x6e, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x53, 0x50, 0x44, 0x58, 0x2d, 0x4c, 0x69, 0x63, + 0x65, 0x6e, 0x73, 0x65, 0x2d, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x3a, + 0x20, 0x41, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2d, 0x32, 0x2e, 0x30, 0x0a, 0x0a, 0x08, 0x0a, 0x01, + 0x02, 0x12, 0x03, 0x05, 0x00, 0x19, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x07, 0x00, + 0x29, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x08, 0x00, 0x30, 0x0a, 0x09, 0x0a, 0x02, + 0x03, 0x02, 0x12, 0x03, 0x09, 0x00, 0x2e, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x0b, + 0x00, 0x0f, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x0b, 0x08, 0x14, 0x0a, + 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0c, 0x02, 0x19, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x00, 0x04, 0x12, 0x03, 0x0c, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x00, 0x05, 0x12, 0x03, 0x0c, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, + 0x01, 0x12, 0x03, 0x0c, 0x12, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, + 0x03, 0x0c, 0x17, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0d, 0x02, + 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x04, 0x12, 0x03, 0x0d, 0x02, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x0d, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0d, 0x12, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x0d, 0x24, 0x25, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, + 0x02, 0x12, 0x03, 0x0e, 0x02, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x04, 0x12, + 0x03, 0x0e, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x0e, + 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x0e, 0x12, 0x1d, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x0e, 0x20, 0x21, 0x0a, 0x0a, + 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x11, 0x00, 0x13, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, + 0x01, 0x12, 0x03, 0x11, 0x08, 0x12, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, + 0x12, 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x04, 0x12, 0x03, 0x12, 0x02, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x12, 0x0b, 0x17, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x12, 0x18, 0x26, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x12, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, + 0x02, 0x12, 0x04, 0x15, 0x00, 0x19, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, + 0x15, 0x08, 0x17, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x16, 0x04, 0x3a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x04, 0x12, 0x03, 0x16, 0x04, 0x0c, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x06, 0x12, 0x03, 0x16, 0x0d, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x16, 0x2c, 0x35, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, + 0x02, 0x00, 0x03, 0x12, 0x03, 0x16, 0x38, 0x39, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x01, + 0x12, 0x03, 0x17, 0x04, 0x2d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x04, 0x12, 0x03, + 0x17, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x05, 0x12, 0x03, 0x17, 0x0d, + 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x01, 0x12, 0x03, 0x17, 0x14, 0x28, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x03, 0x12, 0x03, 0x17, 0x2b, 0x2c, 0x0a, 0x0b, 0x0a, + 0x04, 0x04, 0x02, 0x02, 0x02, 0x12, 0x03, 0x18, 0x04, 0x28, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, + 0x02, 0x02, 0x04, 0x12, 0x03, 0x18, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, + 0x06, 0x12, 0x03, 0x18, 0x0d, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x01, 0x12, + 0x03, 0x18, 0x18, 0x23, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x03, 0x12, 0x03, 0x18, + 0x26, 0x27, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x1b, 0x00, 0x1e, 0x01, 0x0a, 0x0a, + 0x0a, 0x03, 0x04, 0x03, 0x01, 0x12, 0x03, 0x1b, 0x08, 0x14, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, + 0x02, 0x00, 0x12, 0x03, 0x1c, 0x04, 0x3a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x04, + 0x12, 0x03, 0x1c, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x06, 0x12, 0x03, + 0x1c, 0x0d, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x1c, 0x2c, + 0x35, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x03, 0x12, 0x03, 0x1c, 0x38, 0x39, 0x0a, + 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x01, 0x12, 0x03, 0x1d, 0x04, 0x2d, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x03, 0x02, 0x01, 0x04, 0x12, 0x03, 0x1d, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, + 0x02, 0x01, 0x05, 0x12, 0x03, 0x1d, 0x0d, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, + 0x01, 0x12, 0x03, 0x1d, 0x14, 0x28, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, + 0x03, 0x1d, 0x2b, 0x2c, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x04, 0x12, 0x04, 0x20, 0x00, 0x24, 0x01, + 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x04, 0x01, 0x12, 0x03, 0x20, 0x08, 0x17, 0x0a, 0x0b, 0x0a, 0x04, + 0x04, 0x04, 0x02, 0x00, 0x12, 0x03, 0x21, 0x04, 0x3a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, + 0x00, 0x04, 0x12, 0x03, 0x21, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x06, + 0x12, 0x03, 0x21, 0x0d, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x01, 0x12, 0x03, + 0x21, 0x2c, 0x35, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x03, 0x12, 0x03, 0x21, 0x38, + 0x39, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x01, 0x12, 0x03, 0x22, 0x04, 0x2d, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x04, 0x12, 0x03, 0x22, 0x04, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x04, 0x02, 0x01, 0x05, 0x12, 0x03, 0x22, 0x0d, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, + 0x02, 0x01, 0x01, 0x12, 0x03, 0x22, 0x14, 0x28, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, + 0x03, 0x12, 0x03, 0x22, 0x2b, 0x2c, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x02, 0x12, 0x03, + 0x23, 0x04, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x04, 0x12, 0x03, 0x23, 0x04, + 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x05, 0x12, 0x03, 0x23, 0x0d, 0x13, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x23, 0x14, 0x22, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x04, 0x02, 0x02, 0x03, 0x12, 0x03, 0x23, 0x25, 0x26, 0x0a, 0x0a, 0x0a, 0x02, 0x04, + 0x05, 0x12, 0x04, 0x26, 0x00, 0x2e, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x05, 0x01, 0x12, 0x03, + 0x26, 0x08, 0x13, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x00, 0x12, 0x03, 0x27, 0x02, 0x1e, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x04, 0x12, 0x03, 0x27, 0x02, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x05, 0x12, 0x03, 0x27, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x05, 0x02, 0x00, 0x01, 0x12, 0x03, 0x27, 0x12, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, + 0x02, 0x00, 0x03, 0x12, 0x03, 0x27, 0x1c, 0x1d, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x05, 0x08, 0x00, + 0x12, 0x04, 0x28, 0x02, 0x2d, 0x03, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x08, 0x00, 0x01, 0x12, + 0x03, 0x28, 0x08, 0x14, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x01, 0x12, 0x03, 0x29, 0x06, + 0x31, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x06, 0x12, 0x03, 0x29, 0x06, 0x15, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x01, 0x12, 0x03, 0x29, 0x16, 0x2c, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x05, 0x02, 0x01, 0x03, 0x12, 0x03, 0x29, 0x2f, 0x30, 0x0a, 0x0b, 0x0a, 0x04, 0x04, + 0x05, 0x02, 0x02, 0x12, 0x03, 0x2a, 0x06, 0x37, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, + 0x06, 0x12, 0x03, 0x2a, 0x06, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x01, 0x12, + 0x03, 0x2a, 0x16, 0x32, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x02, 0x03, 0x12, 0x03, 0x2a, + 0x35, 0x36, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x03, 0x12, 0x03, 0x2b, 0x06, 0x25, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x03, 0x06, 0x12, 0x03, 0x2b, 0x06, 0x12, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x05, 0x02, 0x03, 0x01, 0x12, 0x03, 0x2b, 0x13, 0x20, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x05, 0x02, 0x03, 0x03, 0x12, 0x03, 0x2b, 0x23, 0x24, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, + 0x04, 0x12, 0x03, 0x2c, 0x06, 0x2c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x04, 0x06, 0x12, + 0x03, 0x2c, 0x06, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x04, 0x01, 0x12, 0x03, 0x2c, + 0x16, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x04, 0x03, 0x12, 0x03, 0x2c, 0x2a, 0x2b, + 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x06, 0x12, 0x04, 0x30, 0x00, 0x32, 0x01, 0x0a, 0x0a, 0x0a, 0x03, + 0x04, 0x06, 0x01, 0x12, 0x03, 0x30, 0x08, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x00, + 0x12, 0x03, 0x31, 0x02, 0x28, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x04, 0x12, 0x03, + 0x31, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x06, 0x12, 0x03, 0x31, 0x0b, + 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x01, 0x12, 0x03, 0x31, 0x17, 0x23, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x03, 0x12, 0x03, 0x31, 0x26, 0x27, 0x0a, 0x0a, 0x0a, + 0x02, 0x04, 0x07, 0x12, 0x04, 0x34, 0x00, 0x36, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x07, 0x01, + 0x12, 0x03, 0x34, 0x08, 0x19, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x00, 0x12, 0x03, 0x35, + 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x04, 0x12, 0x03, 0x35, 0x02, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x05, 0x12, 0x03, 0x35, 0x0b, 0x11, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x01, 0x12, 0x03, 0x35, 0x12, 0x26, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x07, 0x02, 0x00, 0x03, 0x12, 0x03, 0x35, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x08, + 0x12, 0x04, 0x38, 0x00, 0x3a, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x08, 0x01, 0x12, 0x03, 0x38, + 0x08, 0x1e, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x08, 0x02, 0x00, 0x12, 0x03, 0x39, 0x02, 0x2b, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x00, 0x04, 0x12, 0x03, 0x39, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x08, 0x02, 0x00, 0x05, 0x12, 0x03, 0x39, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x08, 0x02, 0x00, 0x01, 0x12, 0x03, 0x39, 0x12, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x08, 0x02, + 0x00, 0x03, 0x12, 0x03, 0x39, 0x29, 0x2a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x09, 0x12, 0x04, 0x3c, + 0x00, 0x3e, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x09, 0x01, 0x12, 0x03, 0x3c, 0x08, 0x1f, 0x0a, + 0x0b, 0x0a, 0x04, 0x04, 0x09, 0x02, 0x00, 0x12, 0x03, 0x3d, 0x02, 0x24, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x09, 0x02, 0x00, 0x04, 0x12, 0x03, 0x3d, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x09, + 0x02, 0x00, 0x06, 0x12, 0x03, 0x3d, 0x0b, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, + 0x01, 0x12, 0x03, 0x3d, 0x1b, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, 0x03, 0x12, + 0x03, 0x3d, 0x22, 0x23, 0x0a, 0x0a, 0x0a, 0x02, 0x06, 0x00, 0x12, 0x04, 0x40, 0x00, 0x43, 0x01, + 0x0a, 0x0a, 0x0a, 0x03, 0x06, 0x00, 0x01, 0x12, 0x03, 0x40, 0x08, 0x13, 0x0a, 0x0b, 0x0a, 0x04, + 0x06, 0x00, 0x02, 0x00, 0x12, 0x03, 0x41, 0x04, 0x40, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, + 0x00, 0x01, 0x12, 0x03, 0x41, 0x08, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x02, + 0x12, 0x03, 0x41, 0x12, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, + 0x41, 0x2d, 0x3e, 0x0a, 0x0b, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x01, 0x12, 0x03, 0x42, 0x04, 0x4f, + 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x42, 0x08, 0x17, 0x0a, 0x0c, + 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, 0x42, 0x18, 0x2e, 0x0a, 0x0c, 0x0a, 0x05, + 0x06, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x42, 0x39, 0x4d, 0x0a, 0x0a, 0x0a, 0x02, 0x06, 0x01, + 0x12, 0x04, 0x45, 0x00, 0x48, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x06, 0x01, 0x01, 0x12, 0x03, 0x45, + 0x08, 0x13, 0x0a, 0x0b, 0x0a, 0x04, 0x06, 0x01, 0x02, 0x00, 0x12, 0x03, 0x46, 0x02, 0x45, 0x0a, + 0x0c, 0x0a, 0x05, 0x06, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x46, 0x06, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x06, 0x01, 0x02, 0x00, 0x02, 0x12, 0x03, 0x46, 0x0b, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x06, + 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x46, 0x2c, 0x43, 0x0a, 0x0b, 0x0a, 0x04, 0x06, 0x01, 0x02, + 0x01, 0x12, 0x03, 0x47, 0x02, 0x54, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x01, 0x02, 0x01, 0x01, 0x12, + 0x03, 0x47, 0x06, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x01, 0x02, 0x01, 0x02, 0x12, 0x03, 0x47, + 0x16, 0x2c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x47, 0x37, 0x3d, + 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x47, 0x3e, 0x52, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ]; include!("aptos.indexer.v1.serde.rs"); include!("aptos.indexer.v1.tonic.rs"); diff --git a/protos/rust/src/pb/aptos.indexer.v1.serde.rs b/protos/rust/src/pb/aptos.indexer.v1.serde.rs index f77e4d406fad9..86ab56ebead8d 100644 --- a/protos/rust/src/pb/aptos.indexer.v1.serde.rs +++ b/protos/rust/src/pb/aptos.indexer.v1.serde.rs @@ -2,6 +2,377 @@ // SPDX-License-Identifier: Apache-2.0 // @generated +impl serde::Serialize for ActiveStream { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.id.is_some() { + len += 1; + } + if self.current_version.is_some() { + len += 1; + } + if self.end_version.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.ActiveStream", len)?; + if let Some(v) = self.id.as_ref() { + struct_ser.serialize_field("id", v)?; + } + if let Some(v) = self.current_version.as_ref() { + struct_ser.serialize_field("currentVersion", ToString::to_string(&v).as_str())?; + } + if let Some(v) = self.end_version.as_ref() { + struct_ser.serialize_field("endVersion", ToString::to_string(&v).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for ActiveStream { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "id", + "current_version", + "currentVersion", + "end_version", + "endVersion", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Id, + CurrentVersion, + EndVersion, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "id" => Ok(GeneratedField::Id), + "currentVersion" | "current_version" => Ok(GeneratedField::CurrentVersion), + "endVersion" | "end_version" => Ok(GeneratedField::EndVersion), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = ActiveStream; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.ActiveStream") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut id__ = None; + let mut current_version__ = None; + let mut end_version__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Id => { + if id__.is_some() { + return Err(serde::de::Error::duplicate_field("id")); + } + id__ = map.next_value()?; + } + GeneratedField::CurrentVersion => { + if current_version__.is_some() { + return Err(serde::de::Error::duplicate_field("currentVersion")); + } + current_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + GeneratedField::EndVersion => { + if end_version__.is_some() { + return Err(serde::de::Error::duplicate_field("endVersion")); + } + end_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + } + } + Ok(ActiveStream { + id: id__, + current_version: current_version__, + end_version: end_version__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.ActiveStream", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for DataServiceInfo { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.timestamp.is_some() { + len += 1; + } + if self.known_latest_version.is_some() { + len += 1; + } + if self.stream_info.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.DataServiceInfo", len)?; + if let Some(v) = self.timestamp.as_ref() { + struct_ser.serialize_field("timestamp", v)?; + } + if let Some(v) = self.known_latest_version.as_ref() { + struct_ser.serialize_field("knownLatestVersion", ToString::to_string(&v).as_str())?; + } + if let Some(v) = self.stream_info.as_ref() { + struct_ser.serialize_field("streamInfo", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for DataServiceInfo { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "timestamp", + "known_latest_version", + "knownLatestVersion", + "stream_info", + "streamInfo", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Timestamp, + KnownLatestVersion, + StreamInfo, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "timestamp" => Ok(GeneratedField::Timestamp), + "knownLatestVersion" | "known_latest_version" => Ok(GeneratedField::KnownLatestVersion), + "streamInfo" | "stream_info" => Ok(GeneratedField::StreamInfo), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = DataServiceInfo; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.DataServiceInfo") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut timestamp__ = None; + let mut known_latest_version__ = None; + let mut stream_info__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Timestamp => { + if timestamp__.is_some() { + return Err(serde::de::Error::duplicate_field("timestamp")); + } + timestamp__ = map.next_value()?; + } + GeneratedField::KnownLatestVersion => { + if known_latest_version__.is_some() { + return Err(serde::de::Error::duplicate_field("knownLatestVersion")); + } + known_latest_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + GeneratedField::StreamInfo => { + if stream_info__.is_some() { + return Err(serde::de::Error::duplicate_field("streamInfo")); + } + stream_info__ = map.next_value()?; + } + } + } + Ok(DataServiceInfo { + timestamp: timestamp__, + known_latest_version: known_latest_version__, + stream_info: stream_info__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.DataServiceInfo", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for FullnodeInfo { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.timestamp.is_some() { + len += 1; + } + if self.known_latest_version.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.FullnodeInfo", len)?; + if let Some(v) = self.timestamp.as_ref() { + struct_ser.serialize_field("timestamp", v)?; + } + if let Some(v) = self.known_latest_version.as_ref() { + struct_ser.serialize_field("knownLatestVersion", ToString::to_string(&v).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for FullnodeInfo { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "timestamp", + "known_latest_version", + "knownLatestVersion", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Timestamp, + KnownLatestVersion, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "timestamp" => Ok(GeneratedField::Timestamp), + "knownLatestVersion" | "known_latest_version" => Ok(GeneratedField::KnownLatestVersion), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = FullnodeInfo; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.FullnodeInfo") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut timestamp__ = None; + let mut known_latest_version__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Timestamp => { + if timestamp__.is_some() { + return Err(serde::de::Error::duplicate_field("timestamp")); + } + timestamp__ = map.next_value()?; + } + GeneratedField::KnownLatestVersion => { + if known_latest_version__.is_some() { + return Err(serde::de::Error::duplicate_field("knownLatestVersion")); + } + known_latest_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + } + } + Ok(FullnodeInfo { + timestamp: timestamp__, + known_latest_version: known_latest_version__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.FullnodeInfo", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for GetTransactionsRequest { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result @@ -136,6 +507,754 @@ impl<'de> serde::Deserialize<'de> for GetTransactionsRequest { deserializer.deserialize_struct("aptos.indexer.v1.GetTransactionsRequest", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for GrpcManagerInfo { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.timestamp.is_some() { + len += 1; + } + if self.known_latest_version.is_some() { + len += 1; + } + if self.master_address.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.GrpcManagerInfo", len)?; + if let Some(v) = self.timestamp.as_ref() { + struct_ser.serialize_field("timestamp", v)?; + } + if let Some(v) = self.known_latest_version.as_ref() { + struct_ser.serialize_field("knownLatestVersion", ToString::to_string(&v).as_str())?; + } + if let Some(v) = self.master_address.as_ref() { + struct_ser.serialize_field("masterAddress", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for GrpcManagerInfo { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "timestamp", + "known_latest_version", + "knownLatestVersion", + "master_address", + "masterAddress", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Timestamp, + KnownLatestVersion, + MasterAddress, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "timestamp" => Ok(GeneratedField::Timestamp), + "knownLatestVersion" | "known_latest_version" => Ok(GeneratedField::KnownLatestVersion), + "masterAddress" | "master_address" => Ok(GeneratedField::MasterAddress), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GrpcManagerInfo; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.GrpcManagerInfo") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut timestamp__ = None; + let mut known_latest_version__ = None; + let mut master_address__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Timestamp => { + if timestamp__.is_some() { + return Err(serde::de::Error::duplicate_field("timestamp")); + } + timestamp__ = map.next_value()?; + } + GeneratedField::KnownLatestVersion => { + if known_latest_version__.is_some() { + return Err(serde::de::Error::duplicate_field("knownLatestVersion")); + } + known_latest_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + GeneratedField::MasterAddress => { + if master_address__.is_some() { + return Err(serde::de::Error::duplicate_field("masterAddress")); + } + master_address__ = map.next_value()?; + } + } + } + Ok(GrpcManagerInfo { + timestamp: timestamp__, + known_latest_version: known_latest_version__, + master_address: master_address__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.GrpcManagerInfo", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for HeartbeatRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.service_info.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.HeartbeatRequest", len)?; + if let Some(v) = self.service_info.as_ref() { + struct_ser.serialize_field("serviceInfo", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for HeartbeatRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "service_info", + "serviceInfo", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + ServiceInfo, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "serviceInfo" | "service_info" => Ok(GeneratedField::ServiceInfo), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = HeartbeatRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.HeartbeatRequest") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut service_info__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::ServiceInfo => { + if service_info__.is_some() { + return Err(serde::de::Error::duplicate_field("serviceInfo")); + } + service_info__ = map.next_value()?; + } + } + } + Ok(HeartbeatRequest { + service_info: service_info__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.HeartbeatRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for HeartbeatResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.known_latest_version.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.HeartbeatResponse", len)?; + if let Some(v) = self.known_latest_version.as_ref() { + struct_ser.serialize_field("knownLatestVersion", ToString::to_string(&v).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for HeartbeatResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "known_latest_version", + "knownLatestVersion", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + KnownLatestVersion, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "knownLatestVersion" | "known_latest_version" => Ok(GeneratedField::KnownLatestVersion), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = HeartbeatResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.HeartbeatResponse") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut known_latest_version__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::KnownLatestVersion => { + if known_latest_version__.is_some() { + return Err(serde::de::Error::duplicate_field("knownLatestVersion")); + } + known_latest_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + } + } + Ok(HeartbeatResponse { + known_latest_version: known_latest_version__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.HeartbeatResponse", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for PingDataServiceRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.known_latest_version.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.PingDataServiceRequest", len)?; + if let Some(v) = self.known_latest_version.as_ref() { + struct_ser.serialize_field("knownLatestVersion", ToString::to_string(&v).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for PingDataServiceRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "known_latest_version", + "knownLatestVersion", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + KnownLatestVersion, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "knownLatestVersion" | "known_latest_version" => Ok(GeneratedField::KnownLatestVersion), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = PingDataServiceRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.PingDataServiceRequest") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut known_latest_version__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::KnownLatestVersion => { + if known_latest_version__.is_some() { + return Err(serde::de::Error::duplicate_field("knownLatestVersion")); + } + known_latest_version__ = + map.next_value::<::std::option::Option<::pbjson::private::NumberDeserialize<_>>>()?.map(|x| x.0) + ; + } + } + } + Ok(PingDataServiceRequest { + known_latest_version: known_latest_version__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.PingDataServiceRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for PingDataServiceResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.info.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.PingDataServiceResponse", len)?; + if let Some(v) = self.info.as_ref() { + struct_ser.serialize_field("info", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for PingDataServiceResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "info", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Info, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "info" => Ok(GeneratedField::Info), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = PingDataServiceResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.PingDataServiceResponse") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut info__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Info => { + if info__.is_some() { + return Err(serde::de::Error::duplicate_field("info")); + } + info__ = map.next_value()?; + } + } + } + Ok(PingDataServiceResponse { + info: info__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.PingDataServiceResponse", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for ServiceInfo { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.address.is_some() { + len += 1; + } + if self.service_type.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.ServiceInfo", len)?; + if let Some(v) = self.address.as_ref() { + struct_ser.serialize_field("address", v)?; + } + if let Some(v) = self.service_type.as_ref() { + match v { + service_info::ServiceType::LiveDataServiceInfo(v) => { + struct_ser.serialize_field("liveDataServiceInfo", v)?; + } + service_info::ServiceType::HistoricalDataServiceInfo(v) => { + struct_ser.serialize_field("historicalDataServiceInfo", v)?; + } + service_info::ServiceType::FullnodeInfo(v) => { + struct_ser.serialize_field("fullnodeInfo", v)?; + } + service_info::ServiceType::GrpcManagerInfo(v) => { + struct_ser.serialize_field("grpcManagerInfo", v)?; + } + } + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for ServiceInfo { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "address", + "live_data_service_info", + "liveDataServiceInfo", + "historical_data_service_info", + "historicalDataServiceInfo", + "fullnode_info", + "fullnodeInfo", + "grpc_manager_info", + "grpcManagerInfo", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Address, + LiveDataServiceInfo, + HistoricalDataServiceInfo, + FullnodeInfo, + GrpcManagerInfo, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "address" => Ok(GeneratedField::Address), + "liveDataServiceInfo" | "live_data_service_info" => Ok(GeneratedField::LiveDataServiceInfo), + "historicalDataServiceInfo" | "historical_data_service_info" => Ok(GeneratedField::HistoricalDataServiceInfo), + "fullnodeInfo" | "fullnode_info" => Ok(GeneratedField::FullnodeInfo), + "grpcManagerInfo" | "grpc_manager_info" => Ok(GeneratedField::GrpcManagerInfo), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = ServiceInfo; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.ServiceInfo") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut address__ = None; + let mut service_type__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Address => { + if address__.is_some() { + return Err(serde::de::Error::duplicate_field("address")); + } + address__ = map.next_value()?; + } + GeneratedField::LiveDataServiceInfo => { + if service_type__.is_some() { + return Err(serde::de::Error::duplicate_field("liveDataServiceInfo")); + } + service_type__ = map.next_value::<::std::option::Option<_>>()?.map(service_info::ServiceType::LiveDataServiceInfo) +; + } + GeneratedField::HistoricalDataServiceInfo => { + if service_type__.is_some() { + return Err(serde::de::Error::duplicate_field("historicalDataServiceInfo")); + } + service_type__ = map.next_value::<::std::option::Option<_>>()?.map(service_info::ServiceType::HistoricalDataServiceInfo) +; + } + GeneratedField::FullnodeInfo => { + if service_type__.is_some() { + return Err(serde::de::Error::duplicate_field("fullnodeInfo")); + } + service_type__ = map.next_value::<::std::option::Option<_>>()?.map(service_info::ServiceType::FullnodeInfo) +; + } + GeneratedField::GrpcManagerInfo => { + if service_type__.is_some() { + return Err(serde::de::Error::duplicate_field("grpcManagerInfo")); + } + service_type__ = map.next_value::<::std::option::Option<_>>()?.map(service_info::ServiceType::GrpcManagerInfo) +; + } + } + } + Ok(ServiceInfo { + address: address__, + service_type: service_type__, + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.ServiceInfo", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for StreamInfo { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if !self.active_streams.is_empty() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.indexer.v1.StreamInfo", len)?; + if !self.active_streams.is_empty() { + struct_ser.serialize_field("activeStreams", &self.active_streams)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for StreamInfo { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "active_streams", + "activeStreams", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + ActiveStreams, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "activeStreams" | "active_streams" => Ok(GeneratedField::ActiveStreams), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = StreamInfo; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.indexer.v1.StreamInfo") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut active_streams__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::ActiveStreams => { + if active_streams__.is_some() { + return Err(serde::de::Error::duplicate_field("activeStreams")); + } + active_streams__ = Some(map.next_value()?); + } + } + } + Ok(StreamInfo { + active_streams: active_streams__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("aptos.indexer.v1.StreamInfo", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for TransactionsInStorage { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result diff --git a/protos/rust/src/pb/aptos.indexer.v1.tonic.rs b/protos/rust/src/pb/aptos.indexer.v1.tonic.rs index 3055af3b2a2e3..d991d8eb6cbce 100644 --- a/protos/rust/src/pb/aptos.indexer.v1.tonic.rs +++ b/protos/rust/src/pb/aptos.indexer.v1.tonic.rs @@ -4,7 +4,13 @@ // @generated /// Generated client implementations. pub mod raw_data_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// @@ -27,8 +33,8 @@ pub mod raw_data_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -53,7 +59,7 @@ pub mod raw_data_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { RawDataClient::new(InterceptedService::new(inner, interceptor)) } @@ -101,8 +107,7 @@ pub mod raw_data_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -119,16 +124,22 @@ pub mod raw_data_client { } /// Generated server implementations. pub mod raw_data_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with RawDataServer. #[async_trait] - pub trait RawData: Send + Sync + 'static { + pub trait RawData: std::marker::Send + std::marker::Sync + 'static { /// Server streaming response type for the GetTransactions method. - type GetTransactionsStream: futures_core::Stream< + type GetTransactionsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, > - + Send + + std::marker::Send + 'static; /** Get transactions batch without any filtering from starting version and end if transaction count is present. */ @@ -142,20 +153,18 @@ pub mod raw_data_server { } /// #[derive(Debug)] - pub struct RawDataServer { - inner: _Inner, + pub struct RawDataServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl RawDataServer { + impl RawDataServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -205,8 +214,8 @@ pub mod raw_data_server { impl tonic::codegen::Service> for RawDataServer where T: RawData, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -218,7 +227,6 @@ pub mod raw_data_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/aptos.indexer.v1.RawData/GetTransactions" => { #[allow(non_camel_case_types)] @@ -240,7 +248,7 @@ pub mod raw_data_server { ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - (*inner).get_transactions(request).await + ::get_transactions(&inner, request).await }; Box::pin(fut) } @@ -251,7 +259,6 @@ pub mod raw_data_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTransactionsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -270,20 +277,25 @@ pub mod raw_data_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for RawDataServer { + impl Clone for RawDataServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -295,17 +307,779 @@ pub mod raw_data_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.indexer.v1.RawData"; + impl tonic::server::NamedService for RawDataServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated client implementations. +pub mod grpc_manager_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// + #[derive(Debug, Clone)] + pub struct GrpcManagerClient { + inner: tonic::client::Grpc, + } + impl GrpcManagerClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl GrpcManagerClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> GrpcManagerClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + GrpcManagerClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// + pub async fn heartbeat( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/aptos.indexer.v1.GrpcManager/Heartbeat", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("aptos.indexer.v1.GrpcManager", "Heartbeat")); + self.inner.unary(req, path, codec).await + } + /// + pub async fn get_transactions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/aptos.indexer.v1.GrpcManager/GetTransactions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("aptos.indexer.v1.GrpcManager", "GetTransactions"), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod grpc_manager_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with GrpcManagerServer. + #[async_trait] + pub trait GrpcManager: std::marker::Send + std::marker::Sync + 'static { + /// + async fn heartbeat( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// + async fn get_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// + #[derive(Debug)] + pub struct GrpcManagerServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl GrpcManagerServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self } } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) + impl tonic::codegen::Service> for GrpcManagerServer + where + T: GrpcManager, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/aptos.indexer.v1.GrpcManager/Heartbeat" => { + #[allow(non_camel_case_types)] + struct HeartbeatSvc(pub Arc); + impl< + T: GrpcManager, + > tonic::server::UnaryService + for HeartbeatSvc { + type Response = super::HeartbeatResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::heartbeat(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = HeartbeatSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/aptos.indexer.v1.GrpcManager/GetTransactions" => { + #[allow(non_camel_case_types)] + struct GetTransactionsSvc(pub Arc); + impl< + T: GrpcManager, + > tonic::server::UnaryService + for GetTransactionsSvc { + type Response = super::TransactionsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_transactions(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTransactionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for GrpcManagerServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.indexer.v1.GrpcManager"; + impl tonic::server::NamedService for GrpcManagerServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated client implementations. +pub mod data_service_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// + #[derive(Debug, Clone)] + pub struct DataServiceClient { + inner: tonic::client::Grpc, + } + impl DataServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DataServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DataServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + DataServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// + pub async fn ping( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/aptos.indexer.v1.DataService/Ping", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("aptos.indexer.v1.DataService", "Ping")); + self.inner.unary(req, path, codec).await + } + /// + pub async fn get_transactions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/aptos.indexer.v1.DataService/GetTransactions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("aptos.indexer.v1.DataService", "GetTransactions"), + ); + self.inner.server_streaming(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod data_service_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with DataServiceServer. + #[async_trait] + pub trait DataService: std::marker::Send + std::marker::Sync + 'static { + /// + async fn ping( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the GetTransactions method. + type GetTransactionsStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// + async fn get_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// + #[derive(Debug)] + pub struct DataServiceServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl DataServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for DataServiceServer + where + T: DataService, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/aptos.indexer.v1.DataService/Ping" => { + #[allow(non_camel_case_types)] + struct PingSvc(pub Arc); + impl< + T: DataService, + > tonic::server::UnaryService + for PingSvc { + type Response = super::PingDataServiceResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::ping(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = PingSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/aptos.indexer.v1.DataService/GetTransactions" => { + #[allow(non_camel_case_types)] + struct GetTransactionsSvc(pub Arc); + impl< + T: DataService, + > tonic::server::ServerStreamingService< + super::GetTransactionsRequest, + > for GetTransactionsSvc { + type Response = super::TransactionsResponse; + type ResponseStream = T::GetTransactionsStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_transactions(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTransactionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for DataServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } } } - impl tonic::server::NamedService for RawDataServer { - const NAME: &'static str = "aptos.indexer.v1.RawData"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.indexer.v1.DataService"; + impl tonic::server::NamedService for DataServiceServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/protos/rust/src/pb/aptos.internal.fullnode.v1.rs b/protos/rust/src/pb/aptos.internal.fullnode.v1.rs index 2a2fa4be3823c..f5547af9719d0 100644 --- a/protos/rust/src/pb/aptos.internal.fullnode.v1.rs +++ b/protos/rust/src/pb/aptos.internal.fullnode.v1.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // @generated +// This file is @generated by prost-build. // Transaction data is transferred via 1 stream with batches until terminated. // One stream consists: // StreamStatus: INIT with version x @@ -9,14 +10,12 @@ // TransactionOutput data(size n) // StreamStatus: BATCH_END with version x + (k + 1) * n - 1 -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsOutput { #[prost(message, repeated, tag="1")] pub transactions: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct StreamStatus { #[prost(enumeration="stream_status::StatusType", tag="1")] pub r#type: i32, @@ -45,9 +44,9 @@ pub mod stream_status { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - StatusType::Unspecified => "STATUS_TYPE_UNSPECIFIED", - StatusType::Init => "STATUS_TYPE_INIT", - StatusType::BatchEnd => "STATUS_TYPE_BATCH_END", + Self::Unspecified => "STATUS_TYPE_UNSPECIFIED", + Self::Init => "STATUS_TYPE_INIT", + Self::BatchEnd => "STATUS_TYPE_BATCH_END", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -61,8 +60,7 @@ pub mod stream_status { } } } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetTransactionsFromNodeRequest { /// Required; start version of current stream. /// If not set will panic somewhere @@ -73,7 +71,6 @@ pub struct GetTransactionsFromNodeRequest { #[prost(uint64, optional, tag="2")] pub transactions_count: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionsFromNodeResponse { /// Making sure that all the responses include a chain id @@ -84,8 +81,7 @@ pub struct TransactionsFromNodeResponse { } /// Nested message and enum types in `TransactionsFromNodeResponse`. pub mod transactions_from_node_response { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Response { #[prost(message, tag="1")] Status(super::StreamStatus), @@ -93,199 +89,234 @@ pub mod transactions_from_node_response { Data(super::TransactionsOutput), } } +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct PingFullnodeRequest { +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct PingFullnodeResponse { + #[prost(message, optional, tag="1")] + pub info: ::core::option::Option, +} /// Encoded file descriptor set for the `aptos.internal.fullnode.v1` package pub const FILE_DESCRIPTOR_SET: &[u8] = &[ - 0x0a, 0xf1, 0x17, 0x0a, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x0a, 0xa1, 0x1b, 0x0a, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x26, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5b, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, - 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x92, 0x02, 0x0a, 0x0c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x47, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0b, 0x65, 0x6e, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x02, 0x30, 0x01, 0x48, 0x00, 0x52, 0x0a, - 0x65, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x22, 0x5a, 0x0a, - 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x10, 0x01, 0x12, 0x19, - 0x0a, 0x15, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, - 0x54, 0x43, 0x48, 0x5f, 0x45, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x65, 0x6e, - 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb8, 0x01, 0x0a, 0x1e, 0x47, 0x65, - 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x46, 0x72, 0x6f, - 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x10, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x02, 0x30, 0x01, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, - 0x12, 0x36, 0x0a, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x02, 0x30, 0x01, - 0x48, 0x01, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x15, 0x0a, - 0x13, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xcf, 0x01, 0x0a, 0x1c, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, - 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x48, 0x00, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0c, 0x46, 0x75, 0x6c, 0x6c, 0x6e, - 0x6f, 0x64, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x91, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4e, - 0x6f, 0x64, 0x65, 0x12, 0x3a, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x46, 0x72, 0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x38, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5b, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0x92, 0x02, 0x0a, 0x0c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x47, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x33, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x28, 0x0a, 0x0b, 0x65, 0x6e, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x02, 0x30, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x65, 0x6e, 0x64, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x22, 0x5a, 0x0a, 0x0a, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, + 0x5f, 0x45, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb8, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x10, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x42, 0x02, 0x30, 0x01, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, + 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x02, 0x30, 0x01, 0x48, 0x01, 0x52, + 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x88, 0x01, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0xcf, 0x01, 0x0a, 0x1c, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x48, 0x00, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x50, 0x69, 0x6e, 0x67, 0x46, 0x75, 0x6c, 0x6c, 0x6e, + 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x58, 0x0a, 0x14, 0x50, 0x69, + 0x6e, 0x67, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x48, 0x00, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x32, 0x8d, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, + 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x69, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x2e, + 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x66, + 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x46, + 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, + 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, + 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, + 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x91, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x3a, 0x2e, 0x61, + 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x66, 0x75, + 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4e, 0x6f, 0x64, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0xbe, 0x01, 0x0a, 0x1e, - 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, - 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0xa2, 0x02, 0x03, 0x41, 0x49, 0x46, 0xaa, 0x02, 0x1a, 0x41, 0x70, 0x74, 0x6f, - 0x73, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x1a, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x5c, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, - 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x26, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x5c, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x5c, 0x56, 0x31, - 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1d, 0x41, - 0x70, 0x74, 0x6f, 0x73, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, - 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x4a, 0x8d, 0x0e, 0x0a, - 0x06, 0x12, 0x04, 0x03, 0x00, 0x38, 0x01, 0x0a, 0x4e, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x03, 0x00, - 0x12, 0x32, 0x44, 0x20, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0xc2, 0xa9, - 0x20, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x0a, 0x20, 0x53, 0x50, 0x44, 0x58, 0x2d, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x2d, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x3a, 0x20, 0x41, 0x70, 0x61, 0x63, - 0x68, 0x65, 0x2d, 0x32, 0x2e, 0x30, 0x0a, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x05, 0x00, - 0x23, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x07, 0x00, 0x30, 0x0a, 0xfe, 0x01, 0x0a, - 0x02, 0x04, 0x00, 0x12, 0x04, 0x10, 0x00, 0x12, 0x01, 0x32, 0xf1, 0x01, 0x20, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x69, 0x73, - 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x20, 0x76, 0x69, 0x61, - 0x20, 0x31, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x62, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x20, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x20, 0x74, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x0a, 0x20, 0x4f, 0x6e, 0x65, 0x20, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x73, 0x3a, 0x0a, 0x20, - 0x20, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x20, 0x49, - 0x4e, 0x49, 0x54, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x20, 0x78, 0x0a, 0x20, 0x20, 0x6c, 0x6f, 0x6f, 0x70, 0x20, 0x6b, 0x3a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x6e, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x3a, 0x20, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x45, 0x4e, 0x44, 0x20, 0x77, 0x69, 0x74, 0x68, - 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x78, 0x20, 0x2b, 0x20, 0x28, 0x6b, 0x20, - 0x2b, 0x20, 0x31, 0x29, 0x20, 0x2a, 0x20, 0x6e, 0x20, 0x2d, 0x20, 0x31, 0x0a, 0x0a, 0x0a, 0x0a, - 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x10, 0x08, 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, - 0x00, 0x12, 0x03, 0x11, 0x02, 0x3d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x04, 0x12, - 0x03, 0x11, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x11, - 0x0b, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x11, 0x2c, 0x38, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x11, 0x3b, 0x3c, 0x0a, 0x0a, - 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x14, 0x00, 0x21, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, - 0x01, 0x12, 0x03, 0x14, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x01, 0x04, 0x00, 0x12, 0x04, - 0x15, 0x02, 0x1b, 0x03, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x04, 0x00, 0x01, 0x12, 0x03, 0x15, - 0x07, 0x11, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x01, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x16, 0x04, - 0x20, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x01, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x16, 0x04, - 0x1b, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x01, 0x04, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x16, 0x1e, - 0x1f, 0x0a, 0x34, 0x0a, 0x06, 0x04, 0x01, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x18, 0x04, 0x19, - 0x1a, 0x25, 0x20, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, - 0x65, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x0a, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x01, 0x04, 0x00, 0x02, - 0x01, 0x01, 0x12, 0x03, 0x18, 0x04, 0x14, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x01, 0x04, 0x00, 0x02, - 0x01, 0x02, 0x12, 0x03, 0x18, 0x17, 0x18, 0x0a, 0x31, 0x0a, 0x06, 0x04, 0x01, 0x04, 0x00, 0x02, - 0x02, 0x12, 0x03, 0x1a, 0x04, 0x1e, 0x1a, 0x22, 0x20, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x20, - 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x64, 0x20, 0x6f, 0x66, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x62, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x0a, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x01, - 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x1a, 0x04, 0x19, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x01, - 0x04, 0x00, 0x02, 0x02, 0x02, 0x12, 0x03, 0x1a, 0x1c, 0x1d, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, - 0x02, 0x00, 0x12, 0x03, 0x1c, 0x02, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, - 0x12, 0x03, 0x1c, 0x02, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, - 0x1c, 0x0d, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x1c, 0x14, - 0x15, 0x0a, 0x4a, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x1e, 0x02, 0x1b, 0x1a, 0x3d, - 0x20, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x2e, 0x20, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x74, 0x20, 0x62, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x2c, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x01, 0x02, 0x01, 0x05, 0x12, 0x03, 0x1e, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x1e, 0x09, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, - 0x01, 0x03, 0x12, 0x03, 0x1e, 0x19, 0x1a, 0x0a, 0x39, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x02, 0x12, - 0x03, 0x20, 0x02, 0x37, 0x1a, 0x2c, 0x20, 0x45, 0x6e, 0x64, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x2a, 0x62, - 0x61, 0x74, 0x63, 0x68, 0x2a, 0x2c, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, - 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x04, 0x12, 0x03, 0x20, 0x02, 0x0a, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x05, 0x12, 0x03, 0x20, 0x0b, 0x11, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x01, 0x12, 0x03, 0x20, 0x12, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x01, 0x02, 0x02, 0x03, 0x12, 0x03, 0x20, 0x20, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, - 0x02, 0x02, 0x08, 0x12, 0x03, 0x20, 0x22, 0x36, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x01, 0x02, 0x02, - 0x08, 0x06, 0x12, 0x03, 0x20, 0x23, 0x35, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x23, - 0x00, 0x2b, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x23, 0x08, 0x26, 0x0a, - 0x5a, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x26, 0x02, 0x3c, 0x1a, 0x4d, 0x20, 0x52, - 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x3b, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, 0x20, 0x76, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x61, 0x70, 0x74, 0x6f, 0x73, + 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x30, 0x01, 0x42, 0xbe, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x74, + 0x6f, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x66, 0x75, 0x6c, 0x6c, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, + 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xa2, 0x02, 0x03, 0x41, + 0x49, 0x46, 0xaa, 0x02, 0x1a, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x56, 0x31, 0xca, + 0x02, 0x1a, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x5c, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x26, 0x41, + 0x70, 0x74, 0x6f, 0x73, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x46, 0x75, + 0x6c, 0x6c, 0x6e, 0x6f, 0x64, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1d, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x3a, 0x3a, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x46, 0x75, 0x6c, 0x6c, 0x6e, 0x6f, 0x64, + 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x4a, 0xc4, 0x0f, 0x0a, 0x06, 0x12, 0x04, 0x03, 0x00, 0x41, 0x01, + 0x0a, 0x4e, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x03, 0x00, 0x12, 0x32, 0x44, 0x20, 0x43, 0x6f, 0x70, + 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0xc2, 0xa9, 0x20, 0x41, 0x70, 0x74, 0x6f, 0x73, 0x20, + 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x53, 0x50, 0x44, 0x58, + 0x2d, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x2d, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x3a, 0x20, 0x41, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2d, 0x32, 0x2e, 0x30, 0x0a, + 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x05, 0x00, 0x23, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, + 0x12, 0x03, 0x07, 0x00, 0x30, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x08, 0x00, 0x25, + 0x0a, 0xfe, 0x01, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x11, 0x00, 0x13, 0x01, 0x32, 0xf1, 0x01, + 0x20, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x64, 0x61, 0x74, + 0x61, 0x20, 0x69, 0x73, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, + 0x20, 0x76, 0x69, 0x61, 0x20, 0x31, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x20, 0x77, 0x69, + 0x74, 0x68, 0x20, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x20, 0x75, 0x6e, 0x74, 0x69, 0x6c, + 0x20, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x0a, 0x20, 0x4f, 0x6e, + 0x65, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x3a, 0x20, 0x49, 0x4e, 0x49, 0x54, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x78, 0x0a, 0x20, 0x20, 0x6c, 0x6f, 0x6f, 0x70, 0x20, 0x6b, 0x3a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x28, 0x73, 0x69, 0x7a, 0x65, + 0x20, 0x6e, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x3a, 0x20, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x45, 0x4e, 0x44, 0x20, + 0x77, 0x69, 0x74, 0x68, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x78, 0x20, 0x2b, + 0x20, 0x28, 0x6b, 0x20, 0x2b, 0x20, 0x31, 0x29, 0x20, 0x2a, 0x20, 0x6e, 0x20, 0x2d, 0x20, 0x31, + 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x11, 0x08, 0x1a, 0x0a, 0x0b, 0x0a, + 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x12, 0x02, 0x3d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x00, 0x04, 0x12, 0x03, 0x12, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, + 0x06, 0x12, 0x03, 0x12, 0x0b, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, + 0x03, 0x12, 0x2c, 0x38, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x12, + 0x3b, 0x3c, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x15, 0x00, 0x22, 0x01, 0x0a, 0x0a, + 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x15, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x01, + 0x04, 0x00, 0x12, 0x04, 0x16, 0x02, 0x1c, 0x03, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x04, 0x00, + 0x01, 0x12, 0x03, 0x16, 0x07, 0x11, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x01, 0x04, 0x00, 0x02, 0x00, + 0x12, 0x03, 0x17, 0x04, 0x20, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x01, 0x04, 0x00, 0x02, 0x00, 0x01, + 0x12, 0x03, 0x17, 0x04, 0x1b, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x01, 0x04, 0x00, 0x02, 0x00, 0x02, + 0x12, 0x03, 0x17, 0x1e, 0x1f, 0x0a, 0x34, 0x0a, 0x06, 0x04, 0x01, 0x04, 0x00, 0x02, 0x01, 0x12, + 0x03, 0x19, 0x04, 0x19, 0x1a, 0x25, 0x20, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x0a, 0x0a, 0x0e, 0x0a, 0x07, 0x04, + 0x01, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x19, 0x04, 0x14, 0x0a, 0x0e, 0x0a, 0x07, 0x04, + 0x01, 0x04, 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, 0x19, 0x17, 0x18, 0x0a, 0x31, 0x0a, 0x06, 0x04, + 0x01, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x1b, 0x04, 0x1e, 0x1a, 0x22, 0x20, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x64, 0x20, + 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x0a, 0x0a, 0x0e, + 0x0a, 0x07, 0x04, 0x01, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x1b, 0x04, 0x19, 0x0a, 0x0e, + 0x0a, 0x07, 0x04, 0x01, 0x04, 0x00, 0x02, 0x02, 0x02, 0x12, 0x03, 0x1b, 0x1c, 0x1d, 0x0a, 0x0b, + 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x1d, 0x02, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x1d, 0x02, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x00, 0x01, 0x12, 0x03, 0x1d, 0x0d, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, + 0x12, 0x03, 0x1d, 0x14, 0x15, 0x0a, 0x4a, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x1f, + 0x02, 0x1b, 0x1a, 0x3d, 0x20, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x2e, 0x20, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x62, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x2c, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x2e, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x05, 0x12, 0x03, 0x1f, 0x02, 0x08, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x1f, 0x09, 0x16, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x1f, 0x19, 0x1a, 0x0a, 0x39, 0x0a, 0x04, 0x04, + 0x01, 0x02, 0x02, 0x12, 0x03, 0x21, 0x02, 0x37, 0x1a, 0x2c, 0x20, 0x45, 0x6e, 0x64, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x0a, 0x20, 0x49, 0x66, 0x20, 0x6e, 0x6f, - 0x74, 0x20, 0x73, 0x65, 0x74, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x70, 0x61, 0x6e, 0x69, 0x63, - 0x20, 0x73, 0x6f, 0x6d, 0x65, 0x77, 0x68, 0x65, 0x72, 0x65, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x02, 0x02, 0x00, 0x04, 0x12, 0x03, 0x26, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, - 0x00, 0x05, 0x12, 0x03, 0x26, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, - 0x12, 0x03, 0x26, 0x12, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, - 0x26, 0x25, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x08, 0x12, 0x03, 0x26, 0x27, - 0x3b, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x02, 0x02, 0x00, 0x08, 0x06, 0x12, 0x03, 0x26, 0x28, 0x3a, - 0x0a, 0x76, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x2a, 0x02, 0x3e, 0x1a, 0x69, 0x20, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x3b, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x20, 0x6f, 0x66, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x63, 0x75, + 0x74, 0x20, 0x2a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x2a, 0x2c, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x76, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x04, 0x12, + 0x03, 0x21, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x05, 0x12, 0x03, 0x21, + 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x01, 0x12, 0x03, 0x21, 0x12, 0x1d, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x03, 0x12, 0x03, 0x21, 0x20, 0x21, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x08, 0x12, 0x03, 0x21, 0x22, 0x36, 0x0a, 0x0d, 0x0a, 0x06, + 0x04, 0x01, 0x02, 0x02, 0x08, 0x06, 0x12, 0x03, 0x21, 0x23, 0x35, 0x0a, 0x0a, 0x0a, 0x02, 0x04, + 0x02, 0x12, 0x04, 0x24, 0x00, 0x2c, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, + 0x24, 0x08, 0x26, 0x0a, 0x5a, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x27, 0x02, 0x3c, + 0x1a, 0x4d, 0x20, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x3b, 0x20, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x0a, 0x20, 0x49, - 0x66, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x73, 0x65, 0x74, 0x2c, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x20, 0x69, 0x6e, 0x66, 0x69, - 0x6e, 0x69, 0x74, 0x65, 0x6c, 0x79, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, - 0x04, 0x12, 0x03, 0x2a, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x05, 0x12, - 0x03, 0x2a, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x01, 0x12, 0x03, 0x2a, - 0x12, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x03, 0x12, 0x03, 0x2a, 0x27, 0x28, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x08, 0x12, 0x03, 0x2a, 0x29, 0x3d, 0x0a, 0x0d, - 0x0a, 0x06, 0x04, 0x02, 0x02, 0x01, 0x08, 0x06, 0x12, 0x03, 0x2a, 0x2a, 0x3c, 0x0a, 0x0a, 0x0a, - 0x02, 0x04, 0x03, 0x12, 0x04, 0x2d, 0x00, 0x34, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x03, 0x01, - 0x12, 0x03, 0x2d, 0x08, 0x24, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x03, 0x08, 0x00, 0x12, 0x04, 0x2e, - 0x02, 0x31, 0x03, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x08, 0x00, 0x01, 0x12, 0x03, 0x2e, 0x08, - 0x10, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, 0x03, 0x2f, 0x04, 0x1c, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x06, 0x12, 0x03, 0x2f, 0x04, 0x10, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x2f, 0x11, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, - 0x02, 0x00, 0x03, 0x12, 0x03, 0x2f, 0x1a, 0x1b, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x01, - 0x12, 0x03, 0x30, 0x04, 0x20, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x06, 0x12, 0x03, - 0x30, 0x04, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x12, 0x03, 0x30, 0x17, - 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, 0x03, 0x30, 0x1e, 0x1f, 0x0a, - 0x44, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x02, 0x12, 0x03, 0x33, 0x02, 0x16, 0x1a, 0x37, 0x20, 0x4d, - 0x61, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x75, 0x72, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, - 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x73, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x61, 0x20, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x20, 0x69, 0x64, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x05, 0x12, 0x03, - 0x33, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x01, 0x12, 0x03, 0x33, 0x09, - 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x03, 0x12, 0x03, 0x33, 0x14, 0x15, 0x0a, - 0x0a, 0x0a, 0x02, 0x06, 0x00, 0x12, 0x04, 0x36, 0x00, 0x38, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x06, - 0x00, 0x01, 0x12, 0x03, 0x36, 0x08, 0x14, 0x0a, 0x0b, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x00, 0x12, - 0x03, 0x37, 0x02, 0x6c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x37, - 0x06, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x37, 0x1e, 0x3c, - 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x37, 0x47, 0x4d, 0x0a, 0x0c, - 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x37, 0x4e, 0x6a, 0x62, 0x06, 0x70, 0x72, + 0x66, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x73, 0x65, 0x74, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x70, + 0x61, 0x6e, 0x69, 0x63, 0x20, 0x73, 0x6f, 0x6d, 0x65, 0x77, 0x68, 0x65, 0x72, 0x65, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x04, 0x12, 0x03, 0x27, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x02, 0x02, 0x00, 0x05, 0x12, 0x03, 0x27, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x27, 0x12, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x00, 0x03, 0x12, 0x03, 0x27, 0x25, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x08, + 0x12, 0x03, 0x27, 0x27, 0x3b, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x02, 0x02, 0x00, 0x08, 0x06, 0x12, + 0x03, 0x27, 0x28, 0x3a, 0x0a, 0x76, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x2b, 0x02, + 0x3e, 0x1a, 0x69, 0x20, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x3b, 0x20, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x69, + 0x6e, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x2e, 0x0a, 0x20, 0x49, 0x66, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x73, 0x65, 0x74, 0x2c, 0x20, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x20, + 0x69, 0x6e, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x6c, 0x79, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x02, 0x02, 0x01, 0x04, 0x12, 0x03, 0x2b, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, + 0x02, 0x01, 0x05, 0x12, 0x03, 0x2b, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, + 0x01, 0x12, 0x03, 0x2b, 0x12, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x03, 0x12, + 0x03, 0x2b, 0x27, 0x28, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x08, 0x12, 0x03, 0x2b, + 0x29, 0x3d, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x02, 0x02, 0x01, 0x08, 0x06, 0x12, 0x03, 0x2b, 0x2a, + 0x3c, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x2e, 0x00, 0x35, 0x01, 0x0a, 0x0a, 0x0a, + 0x03, 0x04, 0x03, 0x01, 0x12, 0x03, 0x2e, 0x08, 0x24, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x03, 0x08, + 0x00, 0x12, 0x04, 0x2f, 0x02, 0x32, 0x03, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x08, 0x00, 0x01, + 0x12, 0x03, 0x2f, 0x08, 0x10, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, 0x03, 0x30, + 0x04, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x06, 0x12, 0x03, 0x30, 0x04, 0x10, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x30, 0x11, 0x17, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x03, 0x12, 0x03, 0x30, 0x1a, 0x1b, 0x0a, 0x0b, 0x0a, 0x04, + 0x04, 0x03, 0x02, 0x01, 0x12, 0x03, 0x31, 0x04, 0x20, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, + 0x01, 0x06, 0x12, 0x03, 0x31, 0x04, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, + 0x12, 0x03, 0x31, 0x17, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, 0x03, + 0x31, 0x1e, 0x1f, 0x0a, 0x44, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x02, 0x12, 0x03, 0x34, 0x02, 0x16, + 0x1a, 0x37, 0x20, 0x4d, 0x61, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x75, 0x72, 0x65, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x61, 0x20, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x20, 0x69, 0x64, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, + 0x02, 0x05, 0x12, 0x03, 0x34, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x01, + 0x12, 0x03, 0x34, 0x09, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x03, 0x12, 0x03, + 0x34, 0x14, 0x15, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x04, 0x12, 0x04, 0x37, 0x00, 0x38, 0x01, 0x0a, + 0x0a, 0x0a, 0x03, 0x04, 0x04, 0x01, 0x12, 0x03, 0x37, 0x08, 0x1b, 0x0a, 0x0a, 0x0a, 0x02, 0x04, + 0x05, 0x12, 0x04, 0x3a, 0x00, 0x3c, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x05, 0x01, 0x12, 0x03, + 0x3a, 0x08, 0x1c, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x00, 0x12, 0x03, 0x3b, 0x04, 0x34, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x04, 0x12, 0x03, 0x3b, 0x04, 0x0c, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x06, 0x12, 0x03, 0x3b, 0x0d, 0x2a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x05, 0x02, 0x00, 0x01, 0x12, 0x03, 0x3b, 0x2b, 0x2f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, + 0x02, 0x00, 0x03, 0x12, 0x03, 0x3b, 0x32, 0x33, 0x0a, 0x0a, 0x0a, 0x02, 0x06, 0x00, 0x12, 0x04, + 0x3e, 0x00, 0x41, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x06, 0x00, 0x01, 0x12, 0x03, 0x3e, 0x08, 0x14, + 0x0a, 0x0b, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x00, 0x12, 0x03, 0x3f, 0x02, 0x3f, 0x0a, 0x0c, 0x0a, + 0x05, 0x06, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x3f, 0x06, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, + 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x3f, 0x0b, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, + 0x00, 0x03, 0x12, 0x03, 0x3f, 0x29, 0x3d, 0x0a, 0x0b, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x01, 0x12, + 0x03, 0x40, 0x02, 0x6c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x40, + 0x06, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, 0x40, 0x1e, 0x3c, + 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x06, 0x12, 0x03, 0x40, 0x47, 0x4d, 0x0a, 0x0c, + 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x40, 0x4e, 0x6a, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ]; include!("aptos.internal.fullnode.v1.serde.rs"); diff --git a/protos/rust/src/pb/aptos.internal.fullnode.v1.serde.rs b/protos/rust/src/pb/aptos.internal.fullnode.v1.serde.rs index 089331b842e1b..419ea7b00398d 100644 --- a/protos/rust/src/pb/aptos.internal.fullnode.v1.serde.rs +++ b/protos/rust/src/pb/aptos.internal.fullnode.v1.serde.rs @@ -116,6 +116,168 @@ impl<'de> serde::Deserialize<'de> for GetTransactionsFromNodeRequest { deserializer.deserialize_struct("aptos.internal.fullnode.v1.GetTransactionsFromNodeRequest", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for PingFullnodeRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let len = 0; + let struct_ser = serializer.serialize_struct("aptos.internal.fullnode.v1.PingFullnodeRequest", len)?; + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for PingFullnodeRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + Err(serde::de::Error::unknown_field(value, FIELDS)) + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = PingFullnodeRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.internal.fullnode.v1.PingFullnodeRequest") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + while map.next_key::()?.is_some() { + let _ = map.next_value::()?; + } + Ok(PingFullnodeRequest { + }) + } + } + deserializer.deserialize_struct("aptos.internal.fullnode.v1.PingFullnodeRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for PingFullnodeResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.info.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("aptos.internal.fullnode.v1.PingFullnodeResponse", len)?; + if let Some(v) = self.info.as_ref() { + struct_ser.serialize_field("info", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for PingFullnodeResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "info", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Info, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "info" => Ok(GeneratedField::Info), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = PingFullnodeResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct aptos.internal.fullnode.v1.PingFullnodeResponse") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut info__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::Info => { + if info__.is_some() { + return Err(serde::de::Error::duplicate_field("info")); + } + info__ = map.next_value()?; + } + } + } + Ok(PingFullnodeResponse { + info: info__, + }) + } + } + deserializer.deserialize_struct("aptos.internal.fullnode.v1.PingFullnodeResponse", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for StreamStatus { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result diff --git a/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs b/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs index e95301b991017..f8cf73cd62ea8 100644 --- a/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs +++ b/protos/rust/src/pb/aptos.internal.fullnode.v1.tonic.rs @@ -4,7 +4,13 @@ // @generated /// Generated client implementations. pub mod fullnode_data_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// @@ -27,8 +33,8 @@ pub mod fullnode_data_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -53,7 +59,7 @@ pub mod fullnode_data_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { FullnodeDataClient::new(InterceptedService::new(inner, interceptor)) } @@ -89,6 +95,33 @@ pub mod fullnode_data_client { self } /// + pub async fn ping( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/aptos.internal.fullnode.v1.FullnodeData/Ping", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("aptos.internal.fullnode.v1.FullnodeData", "Ping"), + ); + self.inner.unary(req, path, codec).await + } + /// pub async fn get_transactions_from_node( &mut self, request: impl tonic::IntoRequest, @@ -102,8 +135,7 @@ pub mod fullnode_data_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -125,19 +157,33 @@ pub mod fullnode_data_client { } /// Generated server implementations. pub mod fullnode_data_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with FullnodeDataServer. #[async_trait] - pub trait FullnodeData: Send + Sync + 'static { + pub trait FullnodeData: std::marker::Send + std::marker::Sync + 'static { + /// + async fn ping( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetTransactionsFromNode method. - type GetTransactionsFromNodeStream: futures_core::Stream< + type GetTransactionsFromNodeStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result< super::TransactionsFromNodeResponse, tonic::Status, >, > - + Send + + std::marker::Send + 'static; /// async fn get_transactions_from_node( @@ -150,20 +196,18 @@ pub mod fullnode_data_server { } /// #[derive(Debug)] - pub struct FullnodeDataServer { - inner: _Inner, + pub struct FullnodeDataServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl FullnodeDataServer { + impl FullnodeDataServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -213,8 +257,8 @@ pub mod fullnode_data_server { impl tonic::codegen::Service> for FullnodeDataServer where T: FullnodeData, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -226,8 +270,52 @@ pub mod fullnode_data_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { + "/aptos.internal.fullnode.v1.FullnodeData/Ping" => { + #[allow(non_camel_case_types)] + struct PingSvc(pub Arc); + impl< + T: FullnodeData, + > tonic::server::UnaryService + for PingSvc { + type Response = super::PingFullnodeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::ping(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = PingSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/aptos.internal.fullnode.v1.FullnodeData/GetTransactionsFromNode" => { #[allow(non_camel_case_types)] struct GetTransactionsFromNodeSvc(pub Arc); @@ -250,7 +338,11 @@ pub mod fullnode_data_server { ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - (*inner).get_transactions_from_node(request).await + ::get_transactions_from_node( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -261,7 +353,6 @@ pub mod fullnode_data_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTransactionsFromNodeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -280,20 +371,25 @@ pub mod fullnode_data_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for FullnodeDataServer { + impl Clone for FullnodeDataServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -305,17 +401,9 @@ pub mod fullnode_data_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for FullnodeDataServer { - const NAME: &'static str = "aptos.internal.fullnode.v1.FullnodeData"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.internal.fullnode.v1.FullnodeData"; + impl tonic::server::NamedService for FullnodeDataServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/protos/rust/src/pb/aptos.remote_executor.v1.rs b/protos/rust/src/pb/aptos.remote_executor.v1.rs index dcf6074bfbf9e..29daad3efd968 100644 --- a/protos/rust/src/pb/aptos.remote_executor.v1.rs +++ b/protos/rust/src/pb/aptos.remote_executor.v1.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // @generated -#[allow(clippy::derive_partial_eq_without_eq)] +// This file is @generated by prost-build. #[derive(Clone, PartialEq, ::prost::Message)] pub struct NetworkMessage { #[prost(bytes="vec", tag="1")] @@ -10,8 +10,7 @@ pub struct NetworkMessage { #[prost(string, tag="2")] pub message_type: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Empty { } /// Encoded file descriptor set for the `aptos.remote_executor.v1` package diff --git a/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs b/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs index 84cfa7776d314..85f08bf9e8caa 100644 --- a/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs +++ b/protos/rust/src/pb/aptos.remote_executor.v1.tonic.rs @@ -4,7 +4,13 @@ // @generated /// Generated client implementations. pub mod network_message_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// @@ -27,8 +33,8 @@ pub mod network_message_service_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -53,7 +59,7 @@ pub mod network_message_service_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { NetworkMessageServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -97,8 +103,7 @@ pub mod network_message_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -120,11 +125,17 @@ pub mod network_message_service_client { } /// Generated server implementations. pub mod network_message_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with NetworkMessageServiceServer. #[async_trait] - pub trait NetworkMessageService: Send + Sync + 'static { + pub trait NetworkMessageService: std::marker::Send + std::marker::Sync + 'static { /// async fn simple_msg_exchange( &self, @@ -133,20 +144,18 @@ pub mod network_message_service_server { } /// #[derive(Debug)] - pub struct NetworkMessageServiceServer { - inner: _Inner, + pub struct NetworkMessageServiceServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl NetworkMessageServiceServer { + impl NetworkMessageServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -197,8 +206,8 @@ pub mod network_message_service_server { for NetworkMessageServiceServer where T: NetworkMessageService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -210,7 +219,6 @@ pub mod network_message_service_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/aptos.remote_executor.v1.NetworkMessageService/SimpleMsgExchange" => { #[allow(non_camel_case_types)] @@ -230,7 +238,11 @@ pub mod network_message_service_server { ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - (*inner).simple_msg_exchange(request).await + ::simple_msg_exchange( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -241,7 +253,6 @@ pub mod network_message_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = SimpleMsgExchangeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -260,20 +271,25 @@ pub mod network_message_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for NetworkMessageServiceServer { + impl Clone for NetworkMessageServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -285,18 +301,9 @@ pub mod network_message_service_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService - for NetworkMessageServiceServer { - const NAME: &'static str = "aptos.remote_executor.v1.NetworkMessageService"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "aptos.remote_executor.v1.NetworkMessageService"; + impl tonic::server::NamedService for NetworkMessageServiceServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/protos/rust/src/pb/aptos.transaction.v1.rs b/protos/rust/src/pb/aptos.transaction.v1.rs index 1cd72373d592d..8e9ffcceb7d51 100644 --- a/protos/rust/src/pb/aptos.transaction.v1.rs +++ b/protos/rust/src/pb/aptos.transaction.v1.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // @generated +// This file is @generated by prost-build. /// A block on Aptos holds transactions in chronological order (ordered by a transactions monotonically increasing `version` field) /// All blocks start with a `BlockMetadataTransaction`, and are followed by zero or more transactions. /// The next `BlockMetadataTransaction` denotes the end of the current block, and the start of the next one. @@ -11,7 +12,6 @@ /// the same `height`. /// /// The Genesis Transaction (version 0) is contained within the first block, which has a height of `0` -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Block { /// Timestamp represents the timestamp of the `BlockMetadataTransaction` (or `GenesisTransaction` for the genesis block) @@ -34,7 +34,6 @@ pub struct Block { /// - Block Metadata Transaction: transactions generated by the chain to group together transactions forming a "block" /// - Block Epilogue / State Checkpoint Transaction: transactions generated by the chain to end the group transactions forming a bloc /// - Genesis Transaction: the first transaction of the chain, with all core contract and validator information baked in -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Transaction { #[prost(message, optional, tag="1")] @@ -75,13 +74,13 @@ pub mod transaction { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - TransactionType::Unspecified => "TRANSACTION_TYPE_UNSPECIFIED", - TransactionType::Genesis => "TRANSACTION_TYPE_GENESIS", - TransactionType::BlockMetadata => "TRANSACTION_TYPE_BLOCK_METADATA", - TransactionType::StateCheckpoint => "TRANSACTION_TYPE_STATE_CHECKPOINT", - TransactionType::User => "TRANSACTION_TYPE_USER", - TransactionType::Validator => "TRANSACTION_TYPE_VALIDATOR", - TransactionType::BlockEpilogue => "TRANSACTION_TYPE_BLOCK_EPILOGUE", + Self::Unspecified => "TRANSACTION_TYPE_UNSPECIFIED", + Self::Genesis => "TRANSACTION_TYPE_GENESIS", + Self::BlockMetadata => "TRANSACTION_TYPE_BLOCK_METADATA", + Self::StateCheckpoint => "TRANSACTION_TYPE_STATE_CHECKPOINT", + Self::User => "TRANSACTION_TYPE_USER", + Self::Validator => "TRANSACTION_TYPE_VALIDATOR", + Self::BlockEpilogue => "TRANSACTION_TYPE_BLOCK_EPILOGUE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -98,8 +97,7 @@ pub mod transaction { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum TxnData { #[prost(message, tag="7")] BlockMetadata(super::BlockMetadataTransaction), @@ -118,7 +116,6 @@ pub mod transaction { } } /// Transaction types. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockMetadataTransaction { #[prost(string, tag="1")] @@ -134,7 +131,6 @@ pub struct BlockMetadataTransaction { #[prost(uint32, repeated, tag="6")] pub failed_proposer_indices: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GenesisTransaction { #[prost(message, optional, tag="1")] @@ -142,11 +138,9 @@ pub struct GenesisTransaction { #[prost(message, repeated, tag="2")] pub events: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct StateCheckpointTransaction { } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ValidatorTransaction { #[prost(message, repeated, tag="3")] @@ -156,16 +150,14 @@ pub struct ValidatorTransaction { } /// Nested message and enum types in `ValidatorTransaction`. pub mod validator_transaction { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ObservedJwkUpdate { #[prost(message, optional, tag="1")] pub quorum_certified_update: ::core::option::Option, } /// Nested message and enum types in `ObservedJwkUpdate`. pub mod observed_jwk_update { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportedProviderJwKs { #[prost(string, tag="1")] pub issuer: ::prost::alloc::string::String, @@ -176,16 +168,14 @@ pub mod validator_transaction { } /// Nested message and enum types in `ExportedProviderJWKs`. pub mod exported_provider_jw_ks { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct Jwk { #[prost(oneof="jwk::JwkType", tags="1, 2")] pub jwk_type: ::core::option::Option, } /// Nested message and enum types in `JWK`. pub mod jwk { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct Rsa { #[prost(string, tag="1")] pub kid: ::prost::alloc::string::String, @@ -198,16 +188,14 @@ pub mod validator_transaction { #[prost(string, tag="5")] pub n: ::prost::alloc::string::String, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct UnsupportedJwk { #[prost(bytes="vec", tag="1")] pub id: ::prost::alloc::vec::Vec, #[prost(bytes="vec", tag="2")] pub payload: ::prost::alloc::vec::Vec, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum JwkType { #[prost(message, tag="1")] UnsupportedJwk(UnsupportedJwk), @@ -216,8 +204,7 @@ pub mod validator_transaction { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportedAggregateSignature { #[prost(uint64, repeated, tag="1")] pub signer_indices: ::prost::alloc::vec::Vec, @@ -225,8 +212,7 @@ pub mod validator_transaction { #[prost(bytes="vec", tag="2")] pub sig: ::prost::alloc::vec::Vec, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct QuorumCertifiedUpdate { #[prost(message, optional, tag="1")] pub update: ::core::option::Option, @@ -234,16 +220,14 @@ pub mod validator_transaction { pub multi_sig: ::core::option::Option, } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct DkgUpdate { #[prost(message, optional, tag="1")] pub dkg_transcript: ::core::option::Option, } /// Nested message and enum types in `DkgUpdate`. pub mod dkg_update { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct DkgTranscript { #[prost(uint64, tag="1")] pub epoch: u64, @@ -253,8 +237,7 @@ pub mod validator_transaction { pub payload: ::prost::alloc::vec::Vec, } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum ValidatorTransactionType { #[prost(message, tag="1")] ObservedJwkUpdate(ObservedJwkUpdate), @@ -262,14 +245,12 @@ pub mod validator_transaction { DkgUpdate(DkgUpdate), } } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct BlockEpilogueTransaction { #[prost(message, optional, tag="1")] pub block_end_info: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct BlockEndInfo { #[prost(bool, tag="1")] pub block_gas_limit_reached: bool, @@ -280,7 +261,6 @@ pub struct BlockEndInfo { #[prost(uint64, tag="4")] pub block_approx_output_size: u64, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UserTransaction { #[prost(message, optional, tag="1")] @@ -288,7 +268,6 @@ pub struct UserTransaction { #[prost(message, repeated, tag="2")] pub events: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Event { #[prost(message, optional, tag="1")] @@ -302,7 +281,6 @@ pub struct Event { #[prost(string, tag="4")] pub data: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionInfo { #[prost(bytes="vec", tag="1")] @@ -324,7 +302,6 @@ pub struct TransactionInfo { #[prost(message, repeated, tag="9")] pub changes: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EventKey { #[prost(uint64, tag="1")] @@ -332,7 +309,6 @@ pub struct EventKey { #[prost(string, tag="2")] pub account_address: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UserTransactionRequest { #[prost(string, tag="1")] @@ -350,7 +326,6 @@ pub struct UserTransactionRequest { #[prost(message, optional, tag="7")] pub signature: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteSet { #[prost(enumeration="write_set::WriteSetType", tag="1")] @@ -374,9 +349,9 @@ pub mod write_set { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - WriteSetType::Unspecified => "WRITE_SET_TYPE_UNSPECIFIED", - WriteSetType::ScriptWriteSet => "WRITE_SET_TYPE_SCRIPT_WRITE_SET", - WriteSetType::DirectWriteSet => "WRITE_SET_TYPE_DIRECT_WRITE_SET", + Self::Unspecified => "WRITE_SET_TYPE_UNSPECIFIED", + Self::ScriptWriteSet => "WRITE_SET_TYPE_SCRIPT_WRITE_SET", + Self::DirectWriteSet => "WRITE_SET_TYPE_DIRECT_WRITE_SET", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -389,8 +364,7 @@ pub mod write_set { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum WriteSet { #[prost(message, tag="2")] ScriptWriteSet(super::ScriptWriteSet), @@ -398,7 +372,6 @@ pub mod write_set { DirectWriteSet(super::DirectWriteSet), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ScriptWriteSet { #[prost(string, tag="1")] @@ -406,7 +379,6 @@ pub struct ScriptWriteSet { #[prost(message, optional, tag="2")] pub script: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DirectWriteSet { #[prost(message, repeated, tag="1")] @@ -414,7 +386,6 @@ pub struct DirectWriteSet { #[prost(message, repeated, tag="2")] pub events: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteSetChange { #[prost(enumeration="write_set_change::Type", tag="1")] @@ -442,13 +413,13 @@ pub mod write_set_change { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::DeleteModule => "TYPE_DELETE_MODULE", - Type::DeleteResource => "TYPE_DELETE_RESOURCE", - Type::DeleteTableItem => "TYPE_DELETE_TABLE_ITEM", - Type::WriteModule => "TYPE_WRITE_MODULE", - Type::WriteResource => "TYPE_WRITE_RESOURCE", - Type::WriteTableItem => "TYPE_WRITE_TABLE_ITEM", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::DeleteModule => "TYPE_DELETE_MODULE", + Self::DeleteResource => "TYPE_DELETE_RESOURCE", + Self::DeleteTableItem => "TYPE_DELETE_TABLE_ITEM", + Self::WriteModule => "TYPE_WRITE_MODULE", + Self::WriteResource => "TYPE_WRITE_RESOURCE", + Self::WriteTableItem => "TYPE_WRITE_TABLE_ITEM", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -465,8 +436,7 @@ pub mod write_set_change { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Change { #[prost(message, tag="2")] DeleteModule(super::DeleteModule), @@ -482,7 +452,6 @@ pub mod write_set_change { WriteTableItem(super::WriteTableItem), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteModule { #[prost(string, tag="1")] @@ -492,7 +461,6 @@ pub struct DeleteModule { #[prost(message, optional, tag="3")] pub module: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteResource { #[prost(string, tag="1")] @@ -504,7 +472,6 @@ pub struct DeleteResource { #[prost(string, tag="4")] pub type_str: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteTableItem { #[prost(bytes="vec", tag="1")] @@ -516,7 +483,6 @@ pub struct DeleteTableItem { #[prost(message, optional, tag="4")] pub data: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteTableData { #[prost(string, tag="1")] @@ -524,7 +490,6 @@ pub struct DeleteTableData { #[prost(string, tag="2")] pub key_type: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteModule { #[prost(string, tag="1")] @@ -534,7 +499,6 @@ pub struct WriteModule { #[prost(message, optional, tag="3")] pub data: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteResource { #[prost(string, tag="1")] @@ -548,7 +512,6 @@ pub struct WriteResource { #[prost(string, tag="5")] pub data: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteTableData { #[prost(string, tag="1")] @@ -560,7 +523,6 @@ pub struct WriteTableData { #[prost(string, tag="4")] pub value_type: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteTableItem { #[prost(bytes="vec", tag="1")] @@ -572,7 +534,6 @@ pub struct WriteTableItem { #[prost(message, optional, tag="4")] pub data: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionPayload { #[prost(enumeration="transaction_payload::Type", tag="1")] @@ -598,11 +559,11 @@ pub mod transaction_payload { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", - Type::ScriptPayload => "TYPE_SCRIPT_PAYLOAD", - Type::WriteSetPayload => "TYPE_WRITE_SET_PAYLOAD", - Type::MultisigPayload => "TYPE_MULTISIG_PAYLOAD", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", + Self::ScriptPayload => "TYPE_SCRIPT_PAYLOAD", + Self::WriteSetPayload => "TYPE_WRITE_SET_PAYLOAD", + Self::MultisigPayload => "TYPE_MULTISIG_PAYLOAD", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -617,8 +578,7 @@ pub mod transaction_payload { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Payload { #[prost(message, tag="2")] EntryFunctionPayload(super::EntryFunctionPayload), @@ -630,7 +590,6 @@ pub mod transaction_payload { MultisigPayload(super::MultisigPayload), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EntryFunctionPayload { #[prost(message, optional, tag="1")] @@ -642,7 +601,6 @@ pub struct EntryFunctionPayload { #[prost(string, tag="4")] pub entry_function_id_str: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveScriptBytecode { #[prost(bytes="vec", tag="1")] @@ -650,7 +608,6 @@ pub struct MoveScriptBytecode { #[prost(message, optional, tag="2")] pub abi: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ScriptPayload { #[prost(message, optional, tag="1")] @@ -660,7 +617,6 @@ pub struct ScriptPayload { #[prost(string, repeated, tag="3")] pub arguments: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultisigPayload { #[prost(string, tag="1")] @@ -668,7 +624,6 @@ pub struct MultisigPayload { #[prost(message, optional, tag="2")] pub transaction_payload: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultisigTransactionPayload { #[prost(enumeration="multisig_transaction_payload::Type", tag="1")] @@ -691,8 +646,8 @@ pub mod multisig_transaction_payload { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::EntryFunctionPayload => "TYPE_ENTRY_FUNCTION_PAYLOAD", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -704,14 +659,12 @@ pub mod multisig_transaction_payload { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Payload { #[prost(message, tag="2")] EntryFunctionPayload(super::EntryFunctionPayload), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveModuleBytecode { #[prost(bytes="vec", tag="1")] @@ -719,7 +672,6 @@ pub struct MoveModuleBytecode { #[prost(message, optional, tag="2")] pub abi: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveModule { #[prost(string, tag="1")] @@ -733,7 +685,6 @@ pub struct MoveModule { #[prost(message, repeated, tag="5")] pub structs: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveFunction { #[prost(string, tag="1")] @@ -766,10 +717,10 @@ pub mod move_function { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Visibility::Unspecified => "VISIBILITY_UNSPECIFIED", - Visibility::Private => "VISIBILITY_PRIVATE", - Visibility::Public => "VISIBILITY_PUBLIC", - Visibility::Friend => "VISIBILITY_FRIEND", + Self::Unspecified => "VISIBILITY_UNSPECIFIED", + Self::Private => "VISIBILITY_PRIVATE", + Self::Public => "VISIBILITY_PUBLIC", + Self::Friend => "VISIBILITY_FRIEND", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -784,7 +735,6 @@ pub mod move_function { } } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStruct { #[prost(string, tag="1")] @@ -800,7 +750,6 @@ pub struct MoveStruct { #[prost(message, repeated, tag="5")] pub fields: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStructGenericTypeParam { #[prost(enumeration="MoveAbility", repeated, tag="1")] @@ -808,7 +757,6 @@ pub struct MoveStructGenericTypeParam { #[prost(bool, tag="2")] pub is_phantom: bool, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStructField { #[prost(string, tag="1")] @@ -816,13 +764,11 @@ pub struct MoveStructField { #[prost(message, optional, tag="2")] pub r#type: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveFunctionGenericTypeParam { #[prost(enumeration="MoveAbility", repeated, tag="1")] pub constraints: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveType { #[prost(enumeration="MoveTypes", tag="1")] @@ -832,16 +778,14 @@ pub struct MoveType { } /// Nested message and enum types in `MoveType`. pub mod move_type { - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReferenceType { #[prost(bool, tag="1")] pub mutable: bool, #[prost(message, optional, boxed, tag="2")] pub to: ::core::option::Option<::prost::alloc::boxed::Box>, } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Content { #[prost(message, tag="3")] Vector(::prost::alloc::boxed::Box), @@ -855,13 +799,11 @@ pub mod move_type { Unparsable(::prost::alloc::string::String), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteSetPayload { #[prost(message, optional, tag="1")] pub write_set: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EntryFunctionId { #[prost(message, optional, tag="1")] @@ -869,7 +811,6 @@ pub struct EntryFunctionId { #[prost(string, tag="2")] pub name: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveModuleId { #[prost(string, tag="1")] @@ -877,7 +818,6 @@ pub struct MoveModuleId { #[prost(string, tag="2")] pub name: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MoveStructTag { #[prost(string, tag="1")] @@ -889,7 +829,6 @@ pub struct MoveStructTag { #[prost(message, repeated, tag="4")] pub generic_type_params: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Signature { #[prost(enumeration="signature::Type", tag="1")] @@ -916,12 +855,12 @@ pub mod signature { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::MultiEd25519 => "TYPE_MULTI_ED25519", - Type::MultiAgent => "TYPE_MULTI_AGENT", - Type::FeePayer => "TYPE_FEE_PAYER", - Type::SingleSender => "TYPE_SINGLE_SENDER", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::MultiEd25519 => "TYPE_MULTI_ED25519", + Self::MultiAgent => "TYPE_MULTI_AGENT", + Self::FeePayer => "TYPE_FEE_PAYER", + Self::SingleSender => "TYPE_SINGLE_SENDER", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -937,8 +876,7 @@ pub mod signature { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Signature { #[prost(message, tag="2")] Ed25519(super::Ed25519Signature), @@ -953,7 +891,6 @@ pub mod signature { SingleSender(super::SingleSender), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Ed25519Signature { #[prost(bytes="vec", tag="1")] @@ -961,7 +898,6 @@ pub struct Ed25519Signature { #[prost(bytes="vec", tag="2")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiEd25519Signature { #[prost(bytes="vec", repeated, tag="1")] @@ -973,7 +909,6 @@ pub struct MultiEd25519Signature { #[prost(uint32, repeated, tag="4")] pub public_key_indices: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiAgentSignature { #[prost(message, optional, tag="1")] @@ -983,7 +918,6 @@ pub struct MultiAgentSignature { #[prost(message, repeated, tag="3")] pub secondary_signers: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FeePayerSignature { #[prost(message, optional, tag="1")] @@ -997,7 +931,6 @@ pub struct FeePayerSignature { #[prost(message, optional, tag="5")] pub fee_payer_signer: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnyPublicKey { #[prost(enumeration="any_public_key::Type", tag="1")] @@ -1024,12 +957,12 @@ pub mod any_public_key { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", - Type::Secp256r1Ecdsa => "TYPE_SECP256R1_ECDSA", - Type::Keyless => "TYPE_KEYLESS", - Type::FederatedKeyless => "TYPE_FEDERATED_KEYLESS", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", + Self::Secp256r1Ecdsa => "TYPE_SECP256R1_ECDSA", + Self::Keyless => "TYPE_KEYLESS", + Self::FederatedKeyless => "TYPE_FEDERATED_KEYLESS", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1046,7 +979,6 @@ pub mod any_public_key { } } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnySignature { #[prost(enumeration="any_signature::Type", tag="1")] @@ -1078,11 +1010,11 @@ pub mod any_signature { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", - Type::Webauthn => "TYPE_WEBAUTHN", - Type::Keyless => "TYPE_KEYLESS", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::Secp256k1Ecdsa => "TYPE_SECP256K1_ECDSA", + Self::Webauthn => "TYPE_WEBAUTHN", + Self::Keyless => "TYPE_KEYLESS", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1098,8 +1030,7 @@ pub mod any_signature { } } /// Support: >= 1.10. - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum SignatureVariant { #[prost(message, tag="3")] Ed25519(super::Ed25519), @@ -1111,31 +1042,26 @@ pub mod any_signature { Keyless(super::Keyless), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Ed25519 { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Secp256k1Ecdsa { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WebAuthn { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Keyless { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SingleKeySignature { #[prost(message, optional, tag="1")] @@ -1143,7 +1069,6 @@ pub struct SingleKeySignature { #[prost(message, optional, tag="2")] pub signature: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct IndexedSignature { #[prost(uint32, tag="1")] @@ -1151,7 +1076,6 @@ pub struct IndexedSignature { #[prost(message, optional, tag="2")] pub signature: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiKeySignature { #[prost(message, repeated, tag="1")] @@ -1161,13 +1085,11 @@ pub struct MultiKeySignature { #[prost(uint32, tag="3")] pub signatures_required: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SingleSender { #[prost(message, optional, tag="1")] pub sender: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountSignature { #[prost(enumeration="account_signature::Type", tag="1")] @@ -1193,11 +1115,11 @@ pub mod account_signature { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Ed25519 => "TYPE_ED25519", - Type::MultiEd25519 => "TYPE_MULTI_ED25519", - Type::SingleKey => "TYPE_SINGLE_KEY", - Type::MultiKey => "TYPE_MULTI_KEY", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Ed25519 => "TYPE_ED25519", + Self::MultiEd25519 => "TYPE_MULTI_ED25519", + Self::SingleKey => "TYPE_SINGLE_KEY", + Self::MultiKey => "TYPE_MULTI_KEY", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1212,8 +1134,7 @@ pub mod account_signature { } } } - #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Signature { #[prost(message, tag="2")] Ed25519(super::Ed25519Signature), @@ -1226,7 +1147,6 @@ pub mod account_signature { MultiKeySignature(super::MultiKeySignature), } } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionSizeInfo { #[prost(uint32, tag="1")] @@ -1236,16 +1156,14 @@ pub struct TransactionSizeInfo { #[prost(message, repeated, tag="3")] pub write_op_size_info: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct EventSizeInfo { #[prost(uint32, tag="1")] pub type_tag_bytes: u32, #[prost(uint32, tag="2")] pub total_bytes: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct WriteOpSizeInfo { #[prost(uint32, tag="1")] pub key_bytes: u32, @@ -1283,21 +1201,21 @@ impl MoveTypes { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - MoveTypes::Unspecified => "MOVE_TYPES_UNSPECIFIED", - MoveTypes::Bool => "MOVE_TYPES_BOOL", - MoveTypes::U8 => "MOVE_TYPES_U8", - MoveTypes::U16 => "MOVE_TYPES_U16", - MoveTypes::U32 => "MOVE_TYPES_U32", - MoveTypes::U64 => "MOVE_TYPES_U64", - MoveTypes::U128 => "MOVE_TYPES_U128", - MoveTypes::U256 => "MOVE_TYPES_U256", - MoveTypes::Address => "MOVE_TYPES_ADDRESS", - MoveTypes::Signer => "MOVE_TYPES_SIGNER", - MoveTypes::Vector => "MOVE_TYPES_VECTOR", - MoveTypes::Struct => "MOVE_TYPES_STRUCT", - MoveTypes::GenericTypeParam => "MOVE_TYPES_GENERIC_TYPE_PARAM", - MoveTypes::Reference => "MOVE_TYPES_REFERENCE", - MoveTypes::Unparsable => "MOVE_TYPES_UNPARSABLE", + Self::Unspecified => "MOVE_TYPES_UNSPECIFIED", + Self::Bool => "MOVE_TYPES_BOOL", + Self::U8 => "MOVE_TYPES_U8", + Self::U16 => "MOVE_TYPES_U16", + Self::U32 => "MOVE_TYPES_U32", + Self::U64 => "MOVE_TYPES_U64", + Self::U128 => "MOVE_TYPES_U128", + Self::U256 => "MOVE_TYPES_U256", + Self::Address => "MOVE_TYPES_ADDRESS", + Self::Signer => "MOVE_TYPES_SIGNER", + Self::Vector => "MOVE_TYPES_VECTOR", + Self::Struct => "MOVE_TYPES_STRUCT", + Self::GenericTypeParam => "MOVE_TYPES_GENERIC_TYPE_PARAM", + Self::Reference => "MOVE_TYPES_REFERENCE", + Self::Unparsable => "MOVE_TYPES_UNPARSABLE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1338,11 +1256,11 @@ impl MoveAbility { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - MoveAbility::Unspecified => "MOVE_ABILITY_UNSPECIFIED", - MoveAbility::Copy => "MOVE_ABILITY_COPY", - MoveAbility::Drop => "MOVE_ABILITY_DROP", - MoveAbility::Store => "MOVE_ABILITY_STORE", - MoveAbility::Key => "MOVE_ABILITY_KEY", + Self::Unspecified => "MOVE_ABILITY_UNSPECIFIED", + Self::Copy => "MOVE_ABILITY_COPY", + Self::Drop => "MOVE_ABILITY_DROP", + Self::Store => "MOVE_ABILITY_STORE", + Self::Key => "MOVE_ABILITY_KEY", } } /// Creates an enum from field names used in the ProtoBuf definition. diff --git a/protos/rust/src/pb/aptos.util.timestamp.rs b/protos/rust/src/pb/aptos.util.timestamp.rs index ec95fda8d3d21..f746dba3fd7b1 100644 --- a/protos/rust/src/pb/aptos.util.timestamp.rs +++ b/protos/rust/src/pb/aptos.util.timestamp.rs @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // @generated -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +// This file is @generated by prost-build. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Timestamp { /// Represents seconds of UTC time since Unix epoch /// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to diff --git a/protos/typescript/src/aptos/internal/fullnode/v1/fullnode_data.ts b/protos/typescript/src/aptos/internal/fullnode/v1/fullnode_data.ts index be987c3af53ff..57aa4991c9389 100644 --- a/protos/typescript/src/aptos/internal/fullnode/v1/fullnode_data.ts +++ b/protos/typescript/src/aptos/internal/fullnode/v1/fullnode_data.ts @@ -7,9 +7,17 @@ import { makeGenericClientConstructor, Metadata, } from "@grpc/grpc-js"; -import type { CallOptions, ClientOptions, UntypedServiceImplementation } from "@grpc/grpc-js"; +import type { + CallOptions, + ClientOptions, + ClientUnaryCall, + handleUnaryCall, + ServiceError, + UntypedServiceImplementation, +} from "@grpc/grpc-js"; import Long from "long"; import _m0 from "protobufjs/minimal"; +import { FullnodeInfo } from "../../../indexer/v1/grpc"; import { Transaction } from "../../../transaction/v1/transaction"; export interface TransactionsOutput { @@ -93,6 +101,13 @@ export interface TransactionsFromNodeResponse { chainId?: number | undefined; } +export interface PingFullnodeRequest { +} + +export interface PingFullnodeResponse { + info?: FullnodeInfo | undefined; +} + function createBaseTransactionsOutput(): TransactionsOutput { return { transactions: [] }; } @@ -558,8 +573,187 @@ export const TransactionsFromNodeResponse = { }, }; +function createBasePingFullnodeRequest(): PingFullnodeRequest { + return {}; +} + +export const PingFullnodeRequest = { + encode(_: PingFullnodeRequest, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PingFullnodeRequest { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBasePingFullnodeRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + // encodeTransform encodes a source of message objects. + // Transform + async *encodeTransform( + source: + | AsyncIterable + | Iterable, + ): AsyncIterable { + for await (const pkt of source) { + if (globalThis.Array.isArray(pkt)) { + for (const p of (pkt as any)) { + yield* [PingFullnodeRequest.encode(p).finish()]; + } + } else { + yield* [PingFullnodeRequest.encode(pkt as any).finish()]; + } + } + }, + + // decodeTransform decodes a source of encoded messages. + // Transform + async *decodeTransform( + source: AsyncIterable | Iterable, + ): AsyncIterable { + for await (const pkt of source) { + if (globalThis.Array.isArray(pkt)) { + for (const p of (pkt as any)) { + yield* [PingFullnodeRequest.decode(p)]; + } + } else { + yield* [PingFullnodeRequest.decode(pkt as any)]; + } + } + }, + + fromJSON(_: any): PingFullnodeRequest { + return {}; + }, + + toJSON(_: PingFullnodeRequest): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): PingFullnodeRequest { + return PingFullnodeRequest.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): PingFullnodeRequest { + const message = createBasePingFullnodeRequest(); + return message; + }, +}; + +function createBasePingFullnodeResponse(): PingFullnodeResponse { + return { info: undefined }; +} + +export const PingFullnodeResponse = { + encode(message: PingFullnodeResponse, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.info !== undefined) { + FullnodeInfo.encode(message.info, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PingFullnodeResponse { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBasePingFullnodeResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.info = FullnodeInfo.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + // encodeTransform encodes a source of message objects. + // Transform + async *encodeTransform( + source: + | AsyncIterable + | Iterable, + ): AsyncIterable { + for await (const pkt of source) { + if (globalThis.Array.isArray(pkt)) { + for (const p of (pkt as any)) { + yield* [PingFullnodeResponse.encode(p).finish()]; + } + } else { + yield* [PingFullnodeResponse.encode(pkt as any).finish()]; + } + } + }, + + // decodeTransform decodes a source of encoded messages. + // Transform + async *decodeTransform( + source: AsyncIterable | Iterable, + ): AsyncIterable { + for await (const pkt of source) { + if (globalThis.Array.isArray(pkt)) { + for (const p of (pkt as any)) { + yield* [PingFullnodeResponse.decode(p)]; + } + } else { + yield* [PingFullnodeResponse.decode(pkt as any)]; + } + } + }, + + fromJSON(object: any): PingFullnodeResponse { + return { info: isSet(object.info) ? FullnodeInfo.fromJSON(object.info) : undefined }; + }, + + toJSON(message: PingFullnodeResponse): unknown { + const obj: any = {}; + if (message.info !== undefined) { + obj.info = FullnodeInfo.toJSON(message.info); + } + return obj; + }, + + create(base?: DeepPartial): PingFullnodeResponse { + return PingFullnodeResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): PingFullnodeResponse { + const message = createBasePingFullnodeResponse(); + message.info = (object.info !== undefined && object.info !== null) + ? FullnodeInfo.fromPartial(object.info) + : undefined; + return message; + }, +}; + export type FullnodeDataService = typeof FullnodeDataService; export const FullnodeDataService = { + ping: { + path: "/aptos.internal.fullnode.v1.FullnodeData/Ping", + requestStream: false, + responseStream: false, + requestSerialize: (value: PingFullnodeRequest) => Buffer.from(PingFullnodeRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => PingFullnodeRequest.decode(value), + responseSerialize: (value: PingFullnodeResponse) => Buffer.from(PingFullnodeResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => PingFullnodeResponse.decode(value), + }, getTransactionsFromNode: { path: "/aptos.internal.fullnode.v1.FullnodeData/GetTransactionsFromNode", requestStream: false, @@ -574,10 +768,26 @@ export const FullnodeDataService = { } as const; export interface FullnodeDataServer extends UntypedServiceImplementation { + ping: handleUnaryCall; getTransactionsFromNode: handleServerStreamingCall; } export interface FullnodeDataClient extends Client { + ping( + request: PingFullnodeRequest, + callback: (error: ServiceError | null, response: PingFullnodeResponse) => void, + ): ClientUnaryCall; + ping( + request: PingFullnodeRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: PingFullnodeResponse) => void, + ): ClientUnaryCall; + ping( + request: PingFullnodeRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: PingFullnodeResponse) => void, + ): ClientUnaryCall; getTransactionsFromNode( request: GetTransactionsFromNodeRequest, options?: Partial, diff --git a/protos/typescript/src/index.aptos.indexer.v1.ts b/protos/typescript/src/index.aptos.indexer.v1.ts index b3dd8fc7ceb72..db6b45c6ddf74 100644 --- a/protos/typescript/src/index.aptos.indexer.v1.ts +++ b/protos/typescript/src/index.aptos.indexer.v1.ts @@ -1,3 +1,4 @@ /* eslint-disable */ export * from "./aptos/indexer/v1/raw_data"; +export * from "./aptos/indexer/v1/grpc";