From 0ee0d8115c09b620f1db6e59067ec6c59c7aafc5 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 19 Dec 2024 11:00:57 -0300 Subject: [PATCH 001/189] Set peer capabilities when starting connection --- crates/networking/p2p/kademlia.rs | 15 +++++++++++++-- crates/networking/p2p/rlpx/connection.rs | 6 +++++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index c87e477334..be312b76c0 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -1,6 +1,7 @@ use crate::{ discv4::{time_now_unix, FindNodeRequest}, peer_channels::PeerChannels, + rlpx::p2p::Capability, types::Node, }; use ethrex_core::{H256, H512, U256}; @@ -257,9 +258,16 @@ impl KademliaTable { None } + /// Sets the necessary data for the peer to be usable from the node's backend /// Set the sender end of the channel between the kademlia table and the peer's active connection + /// Set the peer's supported capabilities /// This function should be called each time a connection is established so the backend can send requests to the peers - pub fn set_channels(&mut self, node_id: H512, channels: PeerChannels) { + pub fn init_backend_communication( + &mut self, + node_id: H512, + channels: PeerChannels, + capabilities: Vec, + ) { let bucket_idx = bucket_number(self.local_node_id, node_id); if let Some(peer) = self.buckets.get_mut(bucket_idx).and_then(|bucket| { bucket @@ -267,7 +275,8 @@ impl KademliaTable { .iter_mut() .find(|peer| peer.node.node_id == node_id) }) { - peer.channels = Some(channels) + peer.channels = Some(channels); + peer.supported_capabilities = capabilities; } } @@ -311,6 +320,7 @@ pub struct PeerData { pub last_ping_hash: Option, pub is_proven: bool, pub find_node_request: Option, + pub supported_capabilities: Vec, /// a ration to track the peers's ping responses pub liveness: u16, /// if a revalidation was sent to the peer, the bool marks if it has answered @@ -331,6 +341,7 @@ impl PeerData { find_node_request: None, revalidation: None, channels: None, + supported_capabilities: vec![], } } diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 0e97fcd265..4d6ec0f505 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -165,7 +165,11 @@ impl RLPxConnection { ) .await; }; - table.lock().await.set_channels(node_id, peer_channels); + let capabilities = self.capabilities.map(|(cap, _)| cap).collect(); + table + .lock() + .await + .init_backend_communication(node_id, peer_channels, capabilities); if let Err(e) = self.handle_peer_conn(sender, receiver).await { self.peer_conn_failed("Error during RLPx connection", e, table) .await; From 3e35ceccafd34d11951d6802e374be04069f3c9d Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 19 Dec 2024 11:33:35 -0300 Subject: [PATCH 002/189] get_peer: Make it random + filter by cap --- crates/networking/p2p/kademlia.rs | 49 ++++++++++++++++++------ crates/networking/p2p/rlpx/connection.rs | 2 +- crates/networking/p2p/rlpx/p2p.rs | 2 +- 3 files changed, 40 insertions(+), 13 deletions(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index be312b76c0..3ea2ef9b26 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -5,6 +5,7 @@ use crate::{ types::Node, }; use ethrex_core::{H256, H512, U256}; +use rand::random; use sha3::{Digest, Keccak256}; use tokio::sync::mpsc::UnboundedSender; use tracing::info; @@ -216,6 +217,31 @@ impl KademliaTable { peers } + /// Returns an iterator for all peers in the table + fn iter_peers(&self) -> impl Iterator { + self.buckets + .iter() + .map(|bucket| bucket.peers.iter()) + .flatten() + } + + /// Returns an iterator for all peers in the table that match the filter + fn filter_peers<'a>( + &'a self, + filter: &'a dyn Fn(&'a PeerData) -> bool, + ) -> impl Iterator + 'a { + self.iter_peers().filter(|peer| filter(peer)) + } + + /// Obtain a random peer from the kademlia table that matches the filter + fn get_random_peer_with_filter<'a>( + &'a self, + filter: &'a dyn Fn(&'a PeerData) -> bool, + ) -> Option<&'a PeerData> { + let peer_idx = random::() % self.filter_peers(filter).count(); + self.filter_peers(filter).nth(peer_idx) + } + /// Replaces the peer with the given id with the latest replacement stored. /// If there are no replacements, it simply remove it /// @@ -262,7 +288,7 @@ impl KademliaTable { /// Set the sender end of the channel between the kademlia table and the peer's active connection /// Set the peer's supported capabilities /// This function should be called each time a connection is established so the backend can send requests to the peers - pub fn init_backend_communication( + pub(crate) fn init_backend_communication( &mut self, node_id: H512, channels: PeerChannels, @@ -280,18 +306,19 @@ impl KademliaTable { } } - /// TODO: Randomly select peer - pub fn get_peer(&self) -> Option { - self.get_least_recently_pinged_peers(1).pop() - } - - /// Returns the channel ends to an active peer connection - /// The peer is selected randomly (TODO), and doesn't guarantee that the selected peer is not currenlty busy + /// Returns the channel ends to an active peer connection that supports the given capability + /// The peer is selected randomly, and doesn't guarantee that the selected peer is not currenlty busy /// If no peer is found, this method will try again after 10 seconds - /// TODO: Filter peers by capabilities, set max amount of retries - pub async fn get_peer_channels(&self) -> PeerChannels { + pub async fn get_peer_channels(&self, capability: Capability) -> PeerChannels { + let filter = |peer: &PeerData| -> bool { + // Search for peers with an active connection that support the required capabilities + peer.channels.is_some() && peer.supported_capabilities.contains(&capability) + }; loop { - if let Some(channels) = self.get_peer().and_then(|peer| peer.channels) { + if let Some(channels) = self + .get_random_peer_with_filter(&filter) + .and_then(|peer| peer.channels) + { return channels; } info!("[Sync] No peers available, retrying in 10 sec"); diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 4d6ec0f505..5a8566eae4 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -165,7 +165,7 @@ impl RLPxConnection { ) .await; }; - let capabilities = self.capabilities.map(|(cap, _)| cap).collect(); + let capabilities = self.capabilities.iter().map(|(cap, _)| *cap).collect(); table .lock() .await diff --git a/crates/networking/p2p/rlpx/p2p.rs b/crates/networking/p2p/rlpx/p2p.rs index fdc752a838..e913482482 100644 --- a/crates/networking/p2p/rlpx/p2p.rs +++ b/crates/networking/p2p/rlpx/p2p.rs @@ -15,7 +15,7 @@ use super::{ utils::{pubkey2id, snappy_compress}, }; -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Copy)] pub(crate) enum Capability { P2p, Eth, From b3dcdc37d2d08b159dca022a175382421dd9bcc7 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 19 Dec 2024 11:56:47 -0300 Subject: [PATCH 003/189] Fixes --- crates/networking/p2p/kademlia.rs | 2 +- crates/networking/p2p/rlpx/p2p.rs | 2 +- crates/networking/p2p/sync.rs | 13 +++++++------ 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index 3ea2ef9b26..0a271eec5e 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -317,7 +317,7 @@ impl KademliaTable { loop { if let Some(channels) = self .get_random_peer_with_filter(&filter) - .and_then(|peer| peer.channels) + .and_then(|peer| peer.channels.clone()) { return channels; } diff --git a/crates/networking/p2p/rlpx/p2p.rs b/crates/networking/p2p/rlpx/p2p.rs index e913482482..f48e7c4d7b 100644 --- a/crates/networking/p2p/rlpx/p2p.rs +++ b/crates/networking/p2p/rlpx/p2p.rs @@ -16,7 +16,7 @@ use super::{ }; #[derive(Debug, Clone, PartialEq, Copy)] -pub(crate) enum Capability { +pub enum Capability { P2p, Eth, Snap, diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 8efc7cb4ee..8f329bb687 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -18,6 +18,7 @@ use tokio::{ use tracing::{debug, info, warn}; use crate::kademlia::KademliaTable; +use crate::rlpx::p2p::Capability; #[derive(Debug)] pub enum SyncMode { @@ -86,7 +87,7 @@ impl SyncManager { let mut all_block_headers = vec![]; let mut all_block_hashes = vec![]; loop { - let peer = self.peers.lock().await.get_peer_channels().await; + let peer = self.peers.lock().await.get_peer_channels(Capability::Eth).await; debug!("Requesting Block Headers from {current_head}"); // Request Block Headers from Peer if let Some(block_headers) = peer.request_block_headers(current_head).await { @@ -181,7 +182,7 @@ async fn download_and_run_blocks( store: Store, ) -> Result<(), ChainError> { loop { - let peer = peers.lock().await.get_peer_channels().await; + let peer = peers.lock().await.get_peer_channels(Capability::Eth).await; debug!("Requesting Block Bodies "); if let Some(block_bodies) = peer.request_block_bodies(block_hashes.clone()).await { let block_bodies_len = block_bodies.len(); @@ -218,7 +219,7 @@ async fn fetch_blocks_and_receipts( // Snap state fetching will take much longer than this so we don't need to paralelize fetching blocks and receipts // Fetch Block Bodies loop { - let peer = peers.lock().await.get_peer_channels().await; + let peer = peers.lock().await.get_peer_channels(Capability::Eth).await; debug!("Requesting Block Headers "); if let Some(block_bodies) = peer.request_block_bodies(block_hashes.clone()).await { debug!(" Received {} Block Bodies", block_bodies.len()); @@ -286,7 +287,7 @@ async fn rebuild_state_trie( let mut current_state_root = *EMPTY_TRIE_HASH; // Fetch Account Ranges loop { - let peer = peers.clone().lock().await.get_peer_channels().await; + let peer = peers.clone().lock().await.get_peer_channels(Capability::Snap).await; debug!("Requesting Account Range for state root {state_root}, starting hash: {start_account_hash}"); if let Some((account_hashes, accounts, should_continue)) = peer .request_account_range(state_root, start_account_hash) @@ -397,7 +398,7 @@ async fn fetch_bytecode_batch( store: Store, ) -> Result, StoreError> { loop { - let peer = peers.lock().await.get_peer_channels().await; + let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; if let Some(bytecodes) = peer.request_bytecodes(batch.clone()).await { debug!("Received {} bytecodes", bytecodes.len()); // Store the bytecodes @@ -462,7 +463,7 @@ async fn fetch_storage_batch( store: Store, ) -> Result, StoreError> { loop { - let peer = peers.lock().await.get_peer_channels().await; + let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; let (batch_hahses, batch_roots) = batch.clone().into_iter().unzip(); if let Some((mut keys, mut values, incomplete)) = peer .request_storage_ranges(state_root, batch_roots, batch_hahses, H256::zero()) From 75e32b18a22bd5044db4deb7a95c77635cf5f07d Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 19 Dec 2024 12:09:28 -0300 Subject: [PATCH 004/189] Clippy --- crates/networking/p2p/kademlia.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index 0a271eec5e..5fb8fc10ec 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -221,8 +221,7 @@ impl KademliaTable { fn iter_peers(&self) -> impl Iterator { self.buckets .iter() - .map(|bucket| bucket.peers.iter()) - .flatten() + .flat_map(|bucket| bucket.peers.iter()) } /// Returns an iterator for all peers in the table that match the filter From 30fec69cd067e56355eb2bfde83e4b8d58772886 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 19 Dec 2024 12:26:24 -0300 Subject: [PATCH 005/189] fmt --- crates/networking/p2p/kademlia.rs | 4 +--- crates/networking/p2p/sync.rs | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index 5fb8fc10ec..38f804f1e9 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -219,9 +219,7 @@ impl KademliaTable { /// Returns an iterator for all peers in the table fn iter_peers(&self) -> impl Iterator { - self.buckets - .iter() - .flat_map(|bucket| bucket.peers.iter()) + self.buckets.iter().flat_map(|bucket| bucket.peers.iter()) } /// Returns an iterator for all peers in the table that match the filter diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 8f329bb687..01e85ab29a 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -87,7 +87,12 @@ impl SyncManager { let mut all_block_headers = vec![]; let mut all_block_hashes = vec![]; loop { - let peer = self.peers.lock().await.get_peer_channels(Capability::Eth).await; + let peer = self + .peers + .lock() + .await + .get_peer_channels(Capability::Eth) + .await; debug!("Requesting Block Headers from {current_head}"); // Request Block Headers from Peer if let Some(block_headers) = peer.request_block_headers(current_head).await { @@ -287,7 +292,12 @@ async fn rebuild_state_trie( let mut current_state_root = *EMPTY_TRIE_HASH; // Fetch Account Ranges loop { - let peer = peers.clone().lock().await.get_peer_channels(Capability::Snap).await; + let peer = peers + .clone() + .lock() + .await + .get_peer_channels(Capability::Snap) + .await; debug!("Requesting Account Range for state root {state_root}, starting hash: {start_account_hash}"); if let Some((account_hashes, accounts, should_continue)) = peer .request_account_range(state_root, start_account_hash) From 3ba67f4cce683b63ce231782ce134c2eb717f565 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 2 Jan 2025 17:42:59 -0300 Subject: [PATCH 006/189] Add holesky genesis + bootnodes --- Cargo.lock | 1 + cmd/ethrex/Cargo.toml | 1 + cmd/ethrex/ethrex.rs | 17 +++++++++++++---- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 30050ed127..4a2d5c4d45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2633,6 +2633,7 @@ dependencies = [ "ethrex-vm", "hex", "k256", + "lazy_static", "libmdbx", "local-ip-address", "rand 0.8.5", diff --git a/cmd/ethrex/Cargo.toml b/cmd/ethrex/Cargo.toml index 90030c4595..73c7548927 100644 --- a/cmd/ethrex/Cargo.toml +++ b/cmd/ethrex/Cargo.toml @@ -30,6 +30,7 @@ local-ip-address = "0.6" tokio-util.workspace = true libmdbx = { workspace = true, optional = true } redb = { workspace = true, optional = true } +lazy_static.workspace = true cfg-if = "1.0.0" diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index 3a86c5ac75..3be06aae03 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -29,6 +29,7 @@ use tracing::{error, info, warn}; use tracing_subscriber::{filter::Directive, EnvFilter, FmtSubscriber}; mod cli; mod decode; +mod holesky_presets; const DEFAULT_DATADIR: &str = "ethrex"; #[tokio::main] @@ -90,16 +91,24 @@ async fn main() { .get_one::("discovery.port") .expect("discovery.port is required"); - let genesis_file_path = matches + let mut network = matches .get_one::("network") - .expect("network is required"); + .expect("network is required") + .clone(); - let bootnodes: Vec = matches + let mut bootnodes: Vec = matches .get_many("bootnodes") .map(Iterator::copied) .map(Iterator::collect) .unwrap_or_default(); + if network == "holesky" { + warn!("Using holesky presets, bootnodes field will be ignored"); + // Set holesky presets + network = String::from(holesky_presets::HOLESKY_GENESIS_PATH); + bootnodes = holesky_presets::HOLESKY_NODES.to_vec(); + } + if bootnodes.is_empty() { warn!("No bootnodes specified. This node will not be able to connect to the network."); } @@ -130,7 +139,7 @@ async fn main() { } } - let genesis = read_genesis_file(genesis_file_path); + let genesis = read_genesis_file(&network); store .add_initial_state(genesis.clone()) .expect("Failed to create genesis block"); From d9dd1ee0e00934430d584c0ffb20dd677846fef4 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 2 Jan 2025 18:32:38 -0300 Subject: [PATCH 007/189] Push files --- cmd/ethrex/genesis_holesky.json | 1015 +++++++++++++++++++++++++++++++ cmd/ethrex/holesky_presets.rs | 15 + 2 files changed, 1030 insertions(+) create mode 100644 cmd/ethrex/genesis_holesky.json create mode 100644 cmd/ethrex/holesky_presets.rs diff --git a/cmd/ethrex/genesis_holesky.json b/cmd/ethrex/genesis_holesky.json new file mode 100644 index 0000000000..5ff2be977c --- /dev/null +++ b/cmd/ethrex/genesis_holesky.json @@ -0,0 +1,1015 @@ +{ + "config": { + "chainId": 17000, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "mergeNetsplitBlock": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true, + "shanghaiTime": 1696000704, + "cancunTime": 1707305664 + }, + "alloc": { + "0x0000000000000000000000000000000000000000": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000001": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000002": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000003": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000004": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000005": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000006": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000007": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000008": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000009": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000010": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000011": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000012": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000013": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000014": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000015": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000016": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000017": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000018": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000019": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000020": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000021": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000022": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000023": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000024": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000025": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000026": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000027": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000028": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000029": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000030": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000031": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000032": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000033": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000034": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000035": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000036": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000037": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000038": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000039": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000040": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000041": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000042": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000043": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000044": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000045": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000046": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000047": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000048": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000049": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000050": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000051": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000052": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000053": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000054": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000055": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000056": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000057": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000058": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000059": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000060": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000061": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000062": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000063": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000064": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000065": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000066": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000067": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000068": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000069": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000070": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000071": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000072": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000073": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000074": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000075": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000076": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000077": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000078": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000079": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000080": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000081": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000082": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000083": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000084": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000085": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000086": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000087": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000088": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000089": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000090": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000091": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000092": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000093": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000094": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000095": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000096": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000097": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000098": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000099": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009f": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000aa": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ab": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ac": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ad": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ae": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000af": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ba": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000be": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bf": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ca": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ce": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cf": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000da": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000db": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000dc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000dd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000de": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000df": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ea": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000eb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ec": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ed": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ee": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ef": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fa": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fe": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ff": { + "balance": "1" + }, + "0x4242424242424242424242424242424242424242": { + "balance": "0", + "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", + "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", + "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", + "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", + "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", + "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", + "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", + "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", + "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", + "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", + "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", + "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", + "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", + "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", + "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", + "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", + "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", + "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", + "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", + "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", + "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", + "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", + "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", + "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", + "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", + "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", + "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", + "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" + } + }, + "0x0000006916a87b82333f4245046623b23794C65C": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x0be949928Ff199c9EBA9E110db210AA5C94EFAd0": { + "balance": "0x7c13bc4b2c133c56000000" + }, + "0x0C100000006d7b5e23a1eAEE637f28cA32Cd5b31": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x0C35317B7a96C454E2CB3d1A255D775Ab112cCc8": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x0d731cfabC5574329823F26d488416451d2ea376": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x0e79065B5F11b5BD1e62B935A600976ffF3754B9": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x105083929bF9bb22C26cB1777Ec92661170D4285": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x10F5d45854e038071485AC9e402308cF80D2d2fE": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x1268AD189526AC0b386faF06eFfC46779c340eE6": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x12Cba59f5A74DB81a12ff63C349Bd82CBF6007C2": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x1446D7f6dF00380F246d8211dE7f0FaBC4Fd248C": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x164e38a375247A784A81d420201AA8fe4E513921": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x1B7aA44088a0eA95bdc65fef6E5071E946Bf7d8f": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x222222222222cF64a76AE3d36859958c864fDA2c": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x2f14582947E292a2eCd20C430B46f2d27CFE213c": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x2f2c75B5Dd5D246194812b00eEb3B09c2c66e2eE": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x341c40b94bf2afbfa42573cb78f16ee15a056238": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x34f845773D4364999f2fbC7AA26ABDeE902cBb46": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x3C75594181e03E8ECD8468A0037F058a9dAfad79": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x462396E69dBfa455F405f4DD82F3014Af8003B72": { + "balance": "0xa56fa5b99019a5c8000000" + }, + "0x49Df3CCa2670eB0D591146B16359fe336e476F29": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x4D0b04b405c6b62C7cFC3aE54759747e2C0b4662": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x4D496CcC28058B1D74B7a19541663E21154f9c84": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x509a7667aC8D0320e36172c192506a6188aA84f6": { + "balance": "0x7c13bc4b2c133c56000000" + }, + "0x5180db0237291A6449DdA9ed33aD90a38787621c": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x52730f347dEf6BA09adfF62EaC60D5fEe8205BC4": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x5EAC0fBd3dfef8aE3efa3c5dc1aa193bc6033dFd": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x6a7aA9b882d50Bb7bc5Da1a244719C99f12F06a3": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x6Cc9397c3B38739daCbfaA68EaD5F5D77Ba5F455": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x762cA62ca2549ad806763B3Aa1eA317c429bDBDa": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x778F5F13C4Be78A3a4d7141BCB26999702f407CF": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x875D25Ee4bC604C71BaF6236a8488F22399BED4b": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x8dF7878d3571BEF5e5a744F96287C8D20386d75A": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0x9E415A096fF77650dc925dEA546585B4adB322B6": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xA0766B65A4f7B1da79a1AF79aC695456eFa28644": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xA29B144A449E414A472c60C7AAf1aaFfE329021D": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xa55395566b0b54395B3246f96A0bDc4b8a483df9": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xAC9ba72fb61aA7c31A95df0A8b6ebA6f41EF875e": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xB0498C15879db2eE5471d4926c5fAA25C9a09683": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xB19Fb4c1f280327e60Ed37b1Dc6EE77533539314": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xC21cB9C99C316d1863142F7dD86dd5496D81A8D6": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xc473d412dc52e349862209924c8981b2ee420768": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xC48E23C5F6e1eA0BaEf6530734edC3968f79Af2e": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xc6e2459991BfE27cca6d86722F35da23A1E4Cb97": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xD3994e4d3202dD23c8497d7F75bF1647d1DA1bb1": { + "balance": "0x19D971E4FE8401E74000000" + }, + "0xDCA6e9B48Ea86AeBFDf9929949124042296b6e34": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xe0a2Bd4258D2768837BAa26A28fE71Dc079f84c7": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xEA28d002042fd9898D0Db016be9758eeAFE35C1E": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xEfA7454f1116807975A4750B46695E967850de5D": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xFBFd6Fa9F73Ac6A058E01259034C28001BEf8247": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xe0991E844041bE6F11B99da5b114b6bCf84EBd57": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x15E719b6AcAf1E4411Bf0f9576CB1D0dB161DdFc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x346D827a75F98F0A7a324Ff80b7C3F90252E8baC": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x73b2e0E54510239E22cC936F0b4a6dE1acf0AbdE": { + "balance": "0x52b7d2dcc80cd2e4000000" + }, + "0xBb977B2EE8a111D788B3477D242078d0B837E72b": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x834Dbf5A03e29c25bc55459cCe9c021EeBE676Ad": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xD1F77E4C1C45186e8653C489F90e008a73597296": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xb04aeF2a3d2D86B01006cCD4339A2e943d9c6480": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xC9CA2bA9A27De1Db589d8c33Ab8EDFa2111b31fb": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x4BC656B34De23896fa6069C9862F355b740401aF": { + "balance": "0x084595161401484a000000" + } + }, + "coinbase": "0x0000000000000000000000000000000000000000", + "difficulty": "0x01", + "extraData": "", + "gasLimit": "0x17D7840", + "nonce": "0x1234", + "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "1695902100" +} \ No newline at end of file diff --git a/cmd/ethrex/holesky_presets.rs b/cmd/ethrex/holesky_presets.rs new file mode 100644 index 0000000000..6be2f55794 --- /dev/null +++ b/cmd/ethrex/holesky_presets.rs @@ -0,0 +1,15 @@ +use std::str::FromStr; + +use ethrex_net::bootnode::BootNode; +use lazy_static::lazy_static; + +pub const HOLESKY_GENESIS_PATH: &str = "cmd/ethrex/genesis_holesky.json"; + +lazy_static! { + pub static ref HOLESKY_NODES: Vec = vec![ + BootNode::from_str("enode://ac906289e4b7f12df423d654c5a962b6ebe5b3a74cc9e06292a85221f9a64a6f1cfdd6b714ed6dacef51578f92b34c60ee91e9ede9c7f8fadc4d347326d95e2b@146.190.13.128:30303").unwrap(), + BootNode::from_str("enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303").unwrap(), + BootNode::from_str("enode://7fa09f1e8bb179ab5e73f45d3a7169a946e7b3de5ef5cea3a0d4546677e4435ee38baea4dd10b3ddfdc1f1c5e869052932af8b8aeb6f9738598ec4590d0b11a6@65.109.94.124:30303").unwrap(), + BootNode::from_str("enode://3524632a412f42dee4b9cc899b946912359bb20103d7596bddb9c8009e7683b7bff39ea20040b7ab64d23105d4eac932d86b930a605e632357504df800dba100@172.174.35.249:30303").unwrap(), + ]; +} From b0cd4eea6a97401d3e75dcf3773735f80dd79cdb Mon Sep 17 00:00:00 2001 From: fmoletta <99273364+fmoletta@users.noreply.github.com> Date: Fri, 3 Jan 2025 16:49:24 -0300 Subject: [PATCH 008/189] Fix rem by zero --- crates/networking/p2p/kademlia.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index 38f804f1e9..e4122cbf20 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -235,7 +235,9 @@ impl KademliaTable { &'a self, filter: &'a dyn Fn(&'a PeerData) -> bool, ) -> Option<&'a PeerData> { - let peer_idx = random::() % self.filter_peers(filter).count(); + let peer_idx = rand::random::() + .checked_rem(self.filter_peers(filter).count()) + .unwrap_or_default(); self.filter_peers(filter).nth(peer_idx) } From dd3dd6ba51a271627810ee8a3f089e2ee5716920 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 10:29:35 -0300 Subject: [PATCH 009/189] Apply suggestion --- cmd/ethrex/holesky_presets.rs | 2 +- .../{genesis_holesky.json => networks/holesky/genesis.json} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename cmd/ethrex/{genesis_holesky.json => networks/holesky/genesis.json} (100%) diff --git a/cmd/ethrex/holesky_presets.rs b/cmd/ethrex/holesky_presets.rs index 6be2f55794..c589eba3d2 100644 --- a/cmd/ethrex/holesky_presets.rs +++ b/cmd/ethrex/holesky_presets.rs @@ -3,7 +3,7 @@ use std::str::FromStr; use ethrex_net::bootnode::BootNode; use lazy_static::lazy_static; -pub const HOLESKY_GENESIS_PATH: &str = "cmd/ethrex/genesis_holesky.json"; +pub const HOLESKY_GENESIS_PATH: &str = "cmd/ethrex/networks/holesky/genesis.json"; lazy_static! { pub static ref HOLESKY_NODES: Vec = vec![ diff --git a/cmd/ethrex/genesis_holesky.json b/cmd/ethrex/networks/holesky/genesis.json similarity index 100% rename from cmd/ethrex/genesis_holesky.json rename to cmd/ethrex/networks/holesky/genesis.json From ab60c842d25b729f3553ad055449b75b4f2b46e2 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 11:00:29 -0300 Subject: [PATCH 010/189] Add show_peer_stats method --- crates/networking/p2p/kademlia.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index ebf0b6636a..dc8d57717d 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -324,6 +324,22 @@ impl KademliaTable { tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; } } + + /// Outputs total amount of peers, active peers, and active peers supporting the Snap Capability + pub fn show_peer_stats(&self) { + let active_filter = |peer: &PeerData| -> bool { + peer.channels + .as_ref() + .is_some() + }; + let snap_active_filter = |peer: &PeerData| -> bool { + peer.channels.as_ref().is_some() && peer.supported_capabilities.contains(&Capability::Snap) + }; + let total_peers = self.iter_peers().count(); + let active_peers = self.filter_peers(&active_filter).count(); + let snap_active_peers = self.filter_peers(&snap_active_filter).count(); + info!("Snap Peers: {snap_active_peers} / Active Peers {active_peers} / Total Peers: {total_peers}") + } } /// Computes the distance between two nodes according to the discv4 protocol From 9f4ebc8b978bc96928baf110eb6a99c09205919e Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 11:57:04 -0300 Subject: [PATCH 011/189] fmt --- crates/networking/p2p/kademlia.rs | 11 ++++------- crates/networking/p2p/sync.rs | 7 ++++++- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index dc8d57717d..6a8b4182bd 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -325,15 +325,12 @@ impl KademliaTable { } } - /// Outputs total amount of peers, active peers, and active peers supporting the Snap Capability + /// Outputs total amount of peers, active peers, and active peers supporting the Snap Capability to the command line pub fn show_peer_stats(&self) { - let active_filter = |peer: &PeerData| -> bool { - peer.channels - .as_ref() - .is_some() - }; + let active_filter = |peer: &PeerData| -> bool { peer.channels.as_ref().is_some() }; let snap_active_filter = |peer: &PeerData| -> bool { - peer.channels.as_ref().is_some() && peer.supported_capabilities.contains(&Capability::Snap) + peer.channels.as_ref().is_some() + && peer.supported_capabilities.contains(&Capability::Snap) }; let total_peers = self.iter_peers().count(); let active_peers = self.filter_peers(&active_filter).count(); diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 97d63f96d6..e6f73e0319 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -318,7 +318,12 @@ async fn rebuild_state_trie( // Fetch Account Ranges // If we reached the maximum amount of retries then it means the state we are requesting is probably old and no longer available for _ in 0..MAX_RETRIES { - let peer = peers.clone().lock().await.get_peer_channels(Capability::Snap).await; + let peer = peers + .clone() + .lock() + .await + .get_peer_channels(Capability::Snap) + .await; debug!("Requesting Account Range for state root {state_root}, starting hash: {start_account_hash}"); if let Some((account_hashes, accounts, should_continue)) = peer .request_account_range(state_root, start_account_hash) From 18fcab52b78d969bd0048e29ef2c26db5c03b1bd Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 12:26:03 -0300 Subject: [PATCH 012/189] Move holesky bootnodes to a json file --- Cargo.lock | 1 + cmd/ethrex/ethrex.rs | 6 +++--- cmd/ethrex/holesky_presets.rs | 15 --------------- cmd/ethrex/networks/holesky/bootnodes.json | 6 ++++++ crates/networking/p2p/Cargo.toml | 1 + crates/networking/p2p/bootnode.rs | 10 ++++++++++ 6 files changed, 21 insertions(+), 18 deletions(-) delete mode 100644 cmd/ethrex/holesky_presets.rs create mode 100644 cmd/ethrex/networks/holesky/bootnodes.json diff --git a/Cargo.lock b/Cargo.lock index 4a2d5c4d45..5fef1d06af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2785,6 +2785,7 @@ dependencies = [ "k256", "lazy_static", "rand 0.8.5", + "serde", "serde_json", "sha3", "snap", diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index 3be06aae03..b2e250dcd7 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -29,7 +29,7 @@ use tracing::{error, info, warn}; use tracing_subscriber::{filter::Directive, EnvFilter, FmtSubscriber}; mod cli; mod decode; -mod holesky_presets; +mod networks; const DEFAULT_DATADIR: &str = "ethrex"; #[tokio::main] @@ -105,8 +105,8 @@ async fn main() { if network == "holesky" { warn!("Using holesky presets, bootnodes field will be ignored"); // Set holesky presets - network = String::from(holesky_presets::HOLESKY_GENESIS_PATH); - bootnodes = holesky_presets::HOLESKY_NODES.to_vec(); + network = String::from(networks::HOLESKY_GENESIS_PATH); + bootnodes = networks::HOLESKY_BOOTNODES.to_vec(); } if bootnodes.is_empty() { diff --git a/cmd/ethrex/holesky_presets.rs b/cmd/ethrex/holesky_presets.rs deleted file mode 100644 index c589eba3d2..0000000000 --- a/cmd/ethrex/holesky_presets.rs +++ /dev/null @@ -1,15 +0,0 @@ -use std::str::FromStr; - -use ethrex_net::bootnode::BootNode; -use lazy_static::lazy_static; - -pub const HOLESKY_GENESIS_PATH: &str = "cmd/ethrex/networks/holesky/genesis.json"; - -lazy_static! { - pub static ref HOLESKY_NODES: Vec = vec![ - BootNode::from_str("enode://ac906289e4b7f12df423d654c5a962b6ebe5b3a74cc9e06292a85221f9a64a6f1cfdd6b714ed6dacef51578f92b34c60ee91e9ede9c7f8fadc4d347326d95e2b@146.190.13.128:30303").unwrap(), - BootNode::from_str("enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303").unwrap(), - BootNode::from_str("enode://7fa09f1e8bb179ab5e73f45d3a7169a946e7b3de5ef5cea3a0d4546677e4435ee38baea4dd10b3ddfdc1f1c5e869052932af8b8aeb6f9738598ec4590d0b11a6@65.109.94.124:30303").unwrap(), - BootNode::from_str("enode://3524632a412f42dee4b9cc899b946912359bb20103d7596bddb9c8009e7683b7bff39ea20040b7ab64d23105d4eac932d86b930a605e632357504df800dba100@172.174.35.249:30303").unwrap(), - ]; -} diff --git a/cmd/ethrex/networks/holesky/bootnodes.json b/cmd/ethrex/networks/holesky/bootnodes.json new file mode 100644 index 0000000000..470ef3138b --- /dev/null +++ b/cmd/ethrex/networks/holesky/bootnodes.json @@ -0,0 +1,6 @@ +[ + "enode://ac906289e4b7f12df423d654c5a962b6ebe5b3a74cc9e06292a85221f9a64a6f1cfdd6b714ed6dacef51578f92b34c60ee91e9ede9c7f8fadc4d347326d95e2b@146.190.13.128:30303", + "enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303", + "enode://7fa09f1e8bb179ab5e73f45d3a7169a946e7b3de5ef5cea3a0d4546677e4435ee38baea4dd10b3ddfdc1f1c5e869052932af8b8aeb6f9738598ec4590d0b11a6@65.109.94.124:30303", + "enode://3524632a412f42dee4b9cc899b946912359bb20103d7596bddb9c8009e7683b7bff39ea20040b7ab64d23105d4eac932d86b930a605e632357504df800dba100@172.174.35.249:30303" +] diff --git a/crates/networking/p2p/Cargo.toml b/crates/networking/p2p/Cargo.toml index 10c57bd41e..9631ad7b35 100644 --- a/crates/networking/p2p/Cargo.toml +++ b/crates/networking/p2p/Cargo.toml @@ -19,6 +19,7 @@ hex.workspace = true thiserror.workspace = true lazy_static.workspace = true snap.workspace = true +serde.workspace = true k256 = { version = "0.13.3", features = ["ecdh"] } sha3 = "0.10.8" diff --git a/crates/networking/p2p/bootnode.rs b/crates/networking/p2p/bootnode.rs index 638eb27683..c61ad73d4f 100644 --- a/crates/networking/p2p/bootnode.rs +++ b/crates/networking/p2p/bootnode.rs @@ -33,6 +33,16 @@ pub fn decode_hex(s: &str) -> Result, ParseIntError> { .collect() } +impl<'de> serde::de::Deserialize<'de> for BootNode { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + BootNode::from_str(<&str>::deserialize(deserializer)?) + .map_err(|e| serde::de::Error::custom(e)) + } +} + #[test] fn parse_bootnode_from_string() { let input = "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303"; From 83c4b3bd4a80f4e6ee0a49b33fc0c45aace54577 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 12:27:21 -0300 Subject: [PATCH 013/189] Push file --- cmd/ethrex/networks.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 cmd/ethrex/networks.rs diff --git a/cmd/ethrex/networks.rs b/cmd/ethrex/networks.rs new file mode 100644 index 0000000000..2687a2d14e --- /dev/null +++ b/cmd/ethrex/networks.rs @@ -0,0 +1,12 @@ +use ethrex_net::bootnode::BootNode; +use lazy_static::lazy_static; + +pub const HOLESKY_GENESIS_PATH: &str = "cmd/ethrex/networks/holesky/genesis.json"; +pub const HOLESKY_BOOTNODES_PATH: &str = "cmd/ethrex/networks/holesky/bootnodes.json"; + +lazy_static! { + pub static ref HOLESKY_BOOTNODES: Vec = serde_json::from_reader( + std::fs::File::open(HOLESKY_BOOTNODES_PATH).expect("Failed to open holesky bootnodes file") + ) + .expect("Failed to parse holesky bootnodes file"); +} From a263ae6867fcfc2e664ba8da70fe6c4c1275c146 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 14:05:13 -0300 Subject: [PATCH 014/189] Clippy --- crates/networking/p2p/bootnode.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/networking/p2p/bootnode.rs b/crates/networking/p2p/bootnode.rs index c61ad73d4f..2e0c3c6745 100644 --- a/crates/networking/p2p/bootnode.rs +++ b/crates/networking/p2p/bootnode.rs @@ -38,8 +38,7 @@ impl<'de> serde::de::Deserialize<'de> for BootNode { where D: serde::Deserializer<'de>, { - BootNode::from_str(<&str>::deserialize(deserializer)?) - .map_err(|e| serde::de::Error::custom(e)) + BootNode::from_str(<&str>::deserialize(deserializer)?).map_err(serde::de::Error::custom) } } From b221394c46bc4daa0d1eafa87dcd74e15c723787 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 14:31:04 -0300 Subject: [PATCH 015/189] Aquire receiver lock before sending request --- crates/networking/p2p/peer_channels.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index fb0199e395..f352bd834f 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -80,8 +80,8 @@ impl PeerChannels { skip: 0, reverse: matches!(order, BlockRequestOrder::NewToOld), }); - self.sender.send(request).await.ok()?; let mut receiver = self.receiver.lock().await; + self.sender.send(request).await.ok()?; let block_headers = tokio::time::timeout(PEER_REPLY_TIMOUT, async move { loop { match receiver.recv().await { @@ -113,8 +113,8 @@ impl PeerChannels { id: request_id, block_hashes, }); - self.sender.send(request).await.ok()?; let mut receiver = self.receiver.lock().await; + self.sender.send(request).await.ok()?; let block_bodies = tokio::time::timeout(PEER_REPLY_TIMOUT, async move { loop { match receiver.recv().await { @@ -147,8 +147,8 @@ impl PeerChannels { id: request_id, block_hashes, }); - self.sender.send(request).await.ok()?; let mut receiver = self.receiver.lock().await; + self.sender.send(request).await.ok()?; let receipts = tokio::time::timeout(PEER_REPLY_TIMOUT, async move { loop { match receiver.recv().await { @@ -186,8 +186,8 @@ impl PeerChannels { limit_hash: HASH_MAX, response_bytes: MAX_RESPONSE_BYTES, }); - self.sender.send(request).await.ok()?; let mut receiver = self.receiver.lock().await; + self.sender.send(request).await.ok()?; let (accounts, proof) = tokio::time::timeout(PEER_REPLY_TIMOUT, async move { loop { match receiver.recv().await { @@ -238,8 +238,8 @@ impl PeerChannels { hashes, bytes: MAX_RESPONSE_BYTES, }); - self.sender.send(request).await.ok()?; let mut receiver = self.receiver.lock().await; + self.sender.send(request).await.ok()?; let codes = tokio::time::timeout(PEER_REPLY_TIMOUT, async move { loop { match receiver.recv().await { @@ -281,8 +281,8 @@ impl PeerChannels { limit_hash: HASH_MAX, response_bytes: MAX_RESPONSE_BYTES, }); - self.sender.send(request).await.ok()?; let mut receiver = self.receiver.lock().await; + self.sender.send(request).await.ok()?; let (mut slots, proof) = tokio::time::timeout(PEER_REPLY_TIMOUT, async move { loop { match receiver.recv().await { @@ -387,8 +387,8 @@ impl PeerChannels { .collect(), bytes: MAX_RESPONSE_BYTES, }); - self.sender.send(request).await.ok()?; let mut receiver = self.receiver.lock().await; + self.sender.send(request).await.ok()?; let nodes = tokio::time::timeout(PEER_REPLY_TIMOUT, async move { loop { match receiver.recv().await { @@ -446,8 +446,8 @@ impl PeerChannels { .collect(), bytes: MAX_RESPONSE_BYTES, }); - self.sender.send(request).await.ok()?; let mut receiver = self.receiver.lock().await; + self.sender.send(request).await.ok()?; let nodes = tokio::time::timeout(PEER_REPLY_TIMOUT, async move { loop { match receiver.recv().await { From ba76e4e5af964b1b51a8a1fe9f17c6af932a163b Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 15:02:05 -0300 Subject: [PATCH 016/189] Add bootnodes & genesis --- cmd/ethrex/networks/holesky/genesis.json | 2 +- cmd/ethrex/networks/sepolia/bootnodes.json | 7 ++ cmd/ethrex/networks/sepolia/genesis.json | 76 ++++++++++++++++++++++ 3 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 cmd/ethrex/networks/sepolia/bootnodes.json create mode 100644 cmd/ethrex/networks/sepolia/genesis.json diff --git a/cmd/ethrex/networks/holesky/genesis.json b/cmd/ethrex/networks/holesky/genesis.json index 5ff2be977c..a5fbcde2c4 100644 --- a/cmd/ethrex/networks/holesky/genesis.json +++ b/cmd/ethrex/networks/holesky/genesis.json @@ -1012,4 +1012,4 @@ "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "timestamp": "1695902100" -} \ No newline at end of file +} diff --git a/cmd/ethrex/networks/sepolia/bootnodes.json b/cmd/ethrex/networks/sepolia/bootnodes.json new file mode 100644 index 0000000000..8aed65a6f4 --- /dev/null +++ b/cmd/ethrex/networks/sepolia/bootnodes.json @@ -0,0 +1,7 @@ +[ + "enode://4e5e92199ee224a01932a377160aa432f31d0b351f84ab413a8e0a42f4f36476f8fb1cbe914af0d9aef0d51665c214cf653c651c4bbd9d5550a934f241f1682b@138.197.51.181:30303", + "enode://143e11fb766781d22d92a2e33f8f104cddae4411a122295ed1fdb6638de96a6ce65f5b7c964ba3763bba27961738fef7d3ecc739268f3e5e771fb4c87b6234ba@146.190.1.103:30303", + "enode://8b61dc2d06c3f96fddcbebb0efb29d60d3598650275dc469c22229d3e5620369b0d3dedafd929835fe7f489618f19f456fe7c0df572bf2d914a9f4e006f783a9@170.64.250.88:30303", + "enode://10d62eff032205fcef19497f35ca8477bea0eadfff6d769a147e895d8b2b8f8ae6341630c645c30f5df6e67547c03494ced3d9c5764e8622a26587b083b028e8@139.59.49.206:30303", + "enode://9e9492e2e8836114cc75f5b929784f4f46c324ad01daf87d956f98b3b6c5fcba95524d6e5cf9861dc96a2c8a171ea7105bb554a197455058de185fa870970c7c@138.68.123.152:30303" +] diff --git a/cmd/ethrex/networks/sepolia/genesis.json b/cmd/ethrex/networks/sepolia/genesis.json new file mode 100644 index 0000000000..1111e80793 --- /dev/null +++ b/cmd/ethrex/networks/sepolia/genesis.json @@ -0,0 +1,76 @@ +{ + "config": { + "chainId": 11155111, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "mergeNetsplitBlock": 1735371, + "terminalTotalDifficulty": 17000000000000000, + "terminalTotalDifficultyPassed": true, + "shanghaiTime": 1677557088, + "cancunTime": 1706655072, + "depositContractAddress": "0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D" + }, + "alloc": { + "0xa2A6d93439144FFE4D27c9E088dCD8b783946263": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xBc11295936Aa79d594139de1B2e12629414F3BDB": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x7cF5b79bfe291A67AB02b393E456cCc4c266F753": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xaaec86394441f915bce3e6ab399977e9906f3b69": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xF47CaE1CF79ca6758Bfc787dbD21E6bdBe7112B8": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xd7eDDB78ED295B3C9629240E8924fb8D8874ddD8": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x8b7F0977Bb4f0fBE7076FA22bC24acA043583F5e": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xe2e2659028143784d557bcec6ff3a0721048880a": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xd9a5179f091d85051d3c982785efd1455cec8699": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xbeef32ca5b9a198d27B4e02F4c70439fE60356Cf": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x0000006916a87b82333f4245046623b23794c65c": { + "balance": "0x84595161401484A000000" + }, + "0xb21c33de1fab3fa15499c62b59fe0cc3250020d1": { + "balance": "0x52B7D2DCC80CD2E4000000" + }, + "0x10F5d45854e038071485AC9e402308cF80D2d2fE": { + "balance": "0x52B7D2DCC80CD2E4000000" + }, + "0xd7d76c58b3a519e9fA6Cc4D22dC017259BC49F1E": { + "balance": "0x52B7D2DCC80CD2E4000000" + }, + "0x799D329e5f583419167cD722962485926E338F4a": { + "balance": "0xDE0B6B3A7640000" + } + }, + "coinbase": "0x0000000000000000000000000000000000000000", + "difficulty": "0x20000", + "extraData": "0x5365706f6c69612c20417468656e732c204174746963612c2047726565636521", + "gasLimit": "0x1c9c380", + "nonce": "0x000000000000000", + "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x6159af19" +} From c1edda81a49490b53587a9a45e83ae0ed337b3f7 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 16:38:18 -0300 Subject: [PATCH 017/189] Fix Deserialize for BootNode --- crates/networking/p2p/bootnode.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/bootnode.rs b/crates/networking/p2p/bootnode.rs index 2e0c3c6745..8c6706317d 100644 --- a/crates/networking/p2p/bootnode.rs +++ b/crates/networking/p2p/bootnode.rs @@ -38,7 +38,7 @@ impl<'de> serde::de::Deserialize<'de> for BootNode { where D: serde::Deserializer<'de>, { - BootNode::from_str(<&str>::deserialize(deserializer)?).map_err(serde::de::Error::custom) + BootNode::from_str(&::deserialize(deserializer)?).map_err(serde::de::Error::custom) } } From e8fa56401751f6582455d766ff82b353ebdd364c Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 16:56:29 -0300 Subject: [PATCH 018/189] Mute handle_message errors + show peer stats upon peer request --- crates/networking/p2p/kademlia.rs | 1 + crates/networking/p2p/rlpx/connection.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index 6a8b4182bd..cf7a4ee18a 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -308,6 +308,7 @@ impl KademliaTable { /// The peer is selected randomly, and doesn't guarantee that the selected peer is not currenlty busy /// If no peer is found, this method will try again after 10 seconds pub async fn get_peer_channels(&self, capability: Capability) -> PeerChannels { + self.show_peer_stats(); let filter = |peer: &PeerData| -> bool { // Search for peers with an active connection that support the required capabilities peer.channels.is_some() && peer.supported_capabilities.contains(&capability) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 900098c5a4..e7e2cc568e 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -282,7 +282,7 @@ impl RLPxConnection { tokio::select! { // TODO check if this is cancel safe, and fix it if not. message = self.receive() => { - self.handle_message(message?, sender.clone()).await?; + let _ = self.handle_message(message?, sender.clone()).await; } // This is not ideal, but using the receiver without // this function call, causes the loop to take ownwership From f6e2e2dce6bde0dc97289f35ae80aa567462312f Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 17:00:53 -0300 Subject: [PATCH 019/189] Disable ForkId check --- crates/networking/p2p/rlpx/eth/backend.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/networking/p2p/rlpx/eth/backend.rs b/crates/networking/p2p/rlpx/eth/backend.rs index a76d9f03a8..98bb5c8e5d 100644 --- a/crates/networking/p2p/rlpx/eth/backend.rs +++ b/crates/networking/p2p/rlpx/eth/backend.rs @@ -68,11 +68,11 @@ pub fn validate_status(msg_data: StatusMessage, storage: &Store) -> Result<(), R )); } // Check ForkID - if msg_data.fork_id != fork_id { - return Err(RLPxError::HandshakeError( - "Fork Id does not match".to_string(), - )); - } + // if msg_data.fork_id != fork_id { + // return Err(RLPxError::HandshakeError( + // "Fork Id does not match".to_string(), + // )); + // } Ok(()) } From 0ab0070135542410cf0a62b8701b7385ab24b336 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 17:05:00 -0300 Subject: [PATCH 020/189] Omit unknown caps --- crates/networking/p2p/rlpx/p2p.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/p2p.rs b/crates/networking/p2p/rlpx/p2p.rs index 358248ad54..5b957ca8fb 100644 --- a/crates/networking/p2p/rlpx/p2p.rs +++ b/crates/networking/p2p/rlpx/p2p.rs @@ -7,6 +7,7 @@ use ethrex_rlp::{ structs::{Decoder, Encoder}, }; use k256::PublicKey; +use tracing::info; use crate::rlpx::utils::{id2pubkey, snappy_decompress}; @@ -20,6 +21,7 @@ pub enum Capability { P2p, Eth, Snap, + Unknown, } impl RLPEncode for Capability { @@ -28,6 +30,7 @@ impl RLPEncode for Capability { Self::P2p => "p2p".encode(buf), Self::Eth => "eth".encode(buf), Self::Snap => "snap".encode(buf), + Self::Unknown => "unk".encode(buf), } } } @@ -39,7 +42,7 @@ impl RLPDecode for Capability { "p2p" => Ok((Capability::P2p, rest)), "eth" => Ok((Capability::Eth, rest)), "snap" => Ok((Capability::Snap, rest)), - _ => Err(RLPDecodeError::UnexpectedString), + a => {info!("Unrecognized capability {a}"); Ok((Capability::Unknown, rest))}, } } } From dd1b575fd8cf7ffc720758a37fad8c9b9c54a89e Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 17:05:58 -0300 Subject: [PATCH 021/189] Make sync louder --- crates/networking/p2p/sync.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 9c9eb85ac4..2cb15da8ba 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -107,13 +107,13 @@ impl SyncManager { .await .get_peer_channels(Capability::Eth) .await; - debug!("Requesting Block Headers from {current_head}"); + info!("Requesting Block Headers from {current_head}"); // Request Block Headers from Peer if let Some(mut block_headers) = peer .request_block_headers(current_head, BlockRequestOrder::OldToNew) .await { - debug!("Received {} block headers", block_headers.len()); + info!("Received {} block headers", block_headers.len()); let mut block_hashes = block_headers .iter() .map(|header| header.compute_block_hash()) @@ -219,10 +219,10 @@ async fn download_and_run_blocks( ) -> Result<(), SyncError> { loop { let peer = peers.lock().await.get_peer_channels(Capability::Eth).await; - debug!("Requesting Block Bodies "); + info!("Requesting Block Bodies "); if let Some(block_bodies) = peer.request_block_bodies(block_hashes.clone()).await { let block_bodies_len = block_bodies.len(); - debug!("Received {} Block Bodies", block_bodies_len); + info!("Received {} Block Bodies", block_bodies_len); // Execute and store blocks for (hash, body) in block_hashes .drain(..block_bodies_len) @@ -240,7 +240,7 @@ async fn download_and_run_blocks( store.set_canonical_block(number, hash)?; store.update_latest_block_number(number)?; } - debug!("Executed & stored {} blocks", block_bodies_len); + info!("Executed & stored {} blocks", block_bodies_len); // Check if we need to ask for another batch if block_hashes.is_empty() { break; @@ -258,9 +258,9 @@ async fn store_block_bodies( ) -> Result<(), SyncError> { loop { let peer = peers.lock().await.get_peer_channels(Capability::Eth).await; - debug!("Requesting Block Headers "); + info!("Requesting Block Headers "); if let Some(block_bodies) = peer.request_block_bodies(block_hashes.clone()).await { - debug!(" Received {} Block Bodies", block_bodies.len()); + info!(" Received {} Block Bodies", block_bodies.len()); // Track which bodies we have already fetched let current_block_hashes = block_hashes.drain(..block_bodies.len()); // Add bodies to storage @@ -285,9 +285,9 @@ async fn store_receipts( ) -> Result<(), SyncError> { loop { let peer = peers.lock().await.get_peer_channels(Capability::Eth).await; - debug!("Requesting Block Headers "); + info!("Requesting Block Headers "); if let Some(receipts) = peer.request_receipts(block_hashes.clone()).await { - debug!(" Received {} Receipts", receipts.len()); + info!(" Received {} Receipts", receipts.len()); // Track which blocks we have already fetched receipts for for (block_hash, receipts) in block_hashes.drain(0..receipts.len()).zip(receipts) { store.add_receipts(block_hash, receipts)?; @@ -335,7 +335,7 @@ async fn rebuild_state_trie( .await .get_peer_channels(Capability::Snap) .await; - debug!("Requesting Account Range for state root {state_root}, starting hash: {start_account_hash}"); + info!("Requesting Account Range for state root {state_root}, starting hash: {start_account_hash}"); if let Some((account_hashes, accounts, should_continue)) = peer .request_account_range(state_root, start_account_hash) .await @@ -390,7 +390,7 @@ async fn rebuild_state_trie( storage_sender.send(vec![]).await?; storage_fetcher_handle.await??; let sync_complete = if current_state_root == state_root { - debug!("Completed state sync for state root {state_root}"); + info!("Completed state sync for state root {state_root}"); true } else { // Perform state healing to fix any potential inconsistency in the rebuilt tries @@ -444,7 +444,7 @@ async fn fetch_bytecode_batch( loop { let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; if let Some(bytecodes) = peer.request_bytecodes(batch.clone()).await { - debug!("Received {} bytecodes", bytecodes.len()); + info!("Received {} bytecodes", bytecodes.len()); // Store the bytecodes for code in bytecodes.into_iter() { store.add_account_code(batch.remove(0), code)?; @@ -506,7 +506,7 @@ async fn fetch_storage_batch( .request_storage_ranges(state_root, batch_roots, batch_hahses, H256::zero()) .await { - debug!("Received {} storage ranges", keys.len()); + info!("Received {} storage ranges", keys.len()); let mut _last_range; // Hold on to the last batch (if incomplete) if incomplete { @@ -677,7 +677,7 @@ async fn heal_storage_batch( .request_storage_trienodes(state_root, batch.clone()) .await { - debug!("Received {} nodes", nodes.len()); + info!("Received {} nodes", nodes.len()); // Process the nodes for each account path for (acc_path, paths) in batch.iter_mut() { let mut trie = store.open_storage_trie(*acc_path, *EMPTY_TRIE_HASH); From 8433e9ab0fcaa00ff090ca59c24bb944154cc0e0 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 17:08:51 -0300 Subject: [PATCH 022/189] Mute payload tracing --- crates/networking/rpc/engine/payload.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/networking/rpc/engine/payload.rs b/crates/networking/rpc/engine/payload.rs index 08f80292f7..6e8887b7e3 100644 --- a/crates/networking/rpc/engine/payload.rs +++ b/crates/networking/rpc/engine/payload.rs @@ -220,7 +220,7 @@ fn get_block_from_payload( parent_beacon_block_root: Option, ) -> Result { let block_hash = payload.block_hash; - info!("Received new payload with block hash: {block_hash:#x}"); + debug!("Received new payload with block hash: {block_hash:#x}"); payload .clone() @@ -236,7 +236,7 @@ fn validate_block_hash(payload: &ExecutionPayload, block: &Block) -> Result<(), "Invalid block hash. Expected {actual_block_hash:#x}, got {block_hash:#x}" ))); } - info!("Block hash {block_hash} is valid"); + debug!("Block hash {block_hash} is valid"); Ok(()) } @@ -249,7 +249,7 @@ fn execute_payload(block: &Block, context: &RpcApiContext) -> Result Ok(PayloadStatus::syncing()), // Under the current implementation this is not possible: we always calculate the state @@ -281,7 +281,7 @@ fn execute_payload(block: &Block, context: &RpcApiContext) -> Result { - info!("Block with hash {block_hash} executed and added to storage succesfully"); + debug!("Block with hash {block_hash} executed and added to storage succesfully"); Ok(PayloadStatus::valid_with_hash(block_hash)) } } @@ -314,7 +314,7 @@ fn get_payload( payload_id: u64, context: &RpcApiContext, ) -> Result<(Block, U256, BlobsBundle, bool), RpcErr> { - info!("Requested payload with id: {:#018x}", payload_id); + debug!("Requested payload with id: {:#018x}", payload_id); let payload = context.storage.get_payload(payload_id)?; let Some((payload_block, block_value, blobs_bundle, completed)) = payload else { From dff2d56804fc30faef7d89e39bd24fad11eddc8e Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 17:09:29 -0300 Subject: [PATCH 023/189] Fix --- crates/networking/rpc/engine/payload.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/rpc/engine/payload.rs b/crates/networking/rpc/engine/payload.rs index 6e8887b7e3..92e0595810 100644 --- a/crates/networking/rpc/engine/payload.rs +++ b/crates/networking/rpc/engine/payload.rs @@ -4,7 +4,7 @@ use ethrex_blockchain::payload::build_payload; use ethrex_core::types::{BlobsBundle, Block, Fork}; use ethrex_core::{H256, U256}; use serde_json::Value; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use crate::types::payload::{ExecutionPayload, ExecutionPayloadResponse, PayloadStatus}; use crate::utils::RpcRequest; From a8e0e6c8a10bc788ad03f830e4a6f7e1d5980ceb Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 18:04:05 -0300 Subject: [PATCH 024/189] Debug --- crates/networking/p2p/rlpx/connection.rs | 48 +++++++++++++++--------- crates/networking/p2p/rlpx/p2p.rs | 5 ++- 2 files changed, 35 insertions(+), 18 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index e7e2cc568e..3cd82c5a20 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -51,7 +51,7 @@ use tokio::{ task, time::{sleep, Instant}, }; -use tracing::{debug, error}; +use tracing::{debug, error, warn}; const CAP_P2P: (Capability, u8) = (Capability::P2p, 5); const CAP_ETH: (Capability, u8) = (Capability::Eth, 68); const CAP_SNAP: (Capability, u8) = (Capability::Snap, 1); @@ -238,24 +238,38 @@ impl RLPxConnection { self.send(hello_msg).await?; // Receive Hello message - if let Message::Hello(hello_message) = self.receive().await? { - self.capabilities = hello_message.capabilities; - - // Check if we have any capability in common - for cap in self.capabilities.clone() { - if SUPPORTED_CAPABILITIES.contains(&cap) { - return Ok(()); + match self.receive().await? { + Message::Hello(hello_message) => { + self.capabilities = hello_message.capabilities; + + // Check if we have any capability in common + for cap in self.capabilities.clone() { + if SUPPORTED_CAPABILITIES.contains(&cap) { + return Ok(()); + } } + // Return error if not + Err(RLPxError::HandshakeError( + "No matching capabilities".to_string(), + )) + } + Message::Disconnect(disconnect) => { + warn!( + "Peer replied to Hello with Disconnect with reason: {}", + disconnect.reason() + ); + // Fail if it is not a hello message + Err(RLPxError::HandshakeError( + "Expected Hello message".to_string(), + )) + } + m => { + warn!("Peer replied to Hello with {m:?}"); + // Fail if it is not a hello message + Err(RLPxError::HandshakeError( + "Expected Hello message".to_string(), + )) } - // Return error if not - Err(RLPxError::HandshakeError( - "No matching capabilities".to_string(), - )) - } else { - // Fail if it is not a hello message - Err(RLPxError::HandshakeError( - "Expected Hello message".to_string(), - )) } } diff --git a/crates/networking/p2p/rlpx/p2p.rs b/crates/networking/p2p/rlpx/p2p.rs index 5b957ca8fb..10d19e65f7 100644 --- a/crates/networking/p2p/rlpx/p2p.rs +++ b/crates/networking/p2p/rlpx/p2p.rs @@ -42,7 +42,10 @@ impl RLPDecode for Capability { "p2p" => Ok((Capability::P2p, rest)), "eth" => Ok((Capability::Eth, rest)), "snap" => Ok((Capability::Snap, rest)), - a => {info!("Unrecognized capability {a}"); Ok((Capability::Unknown, rest))}, + a => { + info!("Unrecognized capability {a}"); + Ok((Capability::Unknown, rest)) + } } } } From d97b7360ab1780b2b74b5e16d4b6a67e691e4e47 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 18:08:22 -0300 Subject: [PATCH 025/189] Debug --- crates/networking/p2p/rlpx/connection.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 3cd82c5a20..d97dd85181 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -493,10 +493,16 @@ impl RLPxConnection { debug!("Received Status"); backend::validate_status(msg_data, &self.storage)? } - _msg => { - return Err(RLPxError::HandshakeError( - "Expected a Status message".to_string(), - )) + Message::Disconnect(disconnect) => { + return Err(RLPxError::HandshakeError(format!( + "Peer disconnected due to: {}", + disconnect.reason() + ))) + } + msg => { + return Err(RLPxError::HandshakeError(format!( + "Expected a Status message, got: {msg:?}" + ))) } } } From 296fd6d76aa2f54bdbc0b7d3a7f6631ce9f3ac42 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 18:13:13 -0300 Subject: [PATCH 026/189] Show disconnect reason in HandshakeError --- crates/networking/p2p/rlpx/connection.rs | 49 ++++++++++++++++-------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 900098c5a4..16e2bc40e3 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -238,24 +238,33 @@ impl RLPxConnection { self.send(hello_msg).await?; // Receive Hello message - if let Message::Hello(hello_message) = self.receive().await? { - self.capabilities = hello_message.capabilities; - - // Check if we have any capability in common - for cap in self.capabilities.clone() { - if SUPPORTED_CAPABILITIES.contains(&cap) { - return Ok(()); + match self.receive().await? { + Message::Hello(hello_message) => { + self.capabilities = hello_message.capabilities; + + // Check if we have any capability in common + for cap in self.capabilities.clone() { + if SUPPORTED_CAPABILITIES.contains(&cap) { + return Ok(()); + } } + // Return error if not + Err(RLPxError::HandshakeError( + "No matching capabilities".to_string(), + )) + } + Message::Disconnect(disconnect) => { + return Err(RLPxError::HandshakeError(format!( + "Peer disconnected due to: {}", + disconnect.reason() + ))) + } + _ => { + // Fail if it is not a hello message + Err(RLPxError::HandshakeError( + "Expected Hello message".to_string(), + )) } - // Return error if not - Err(RLPxError::HandshakeError( - "No matching capabilities".to_string(), - )) - } else { - // Fail if it is not a hello message - Err(RLPxError::HandshakeError( - "Expected Hello message".to_string(), - )) } } @@ -479,7 +488,13 @@ impl RLPxConnection { debug!("Received Status"); backend::validate_status(msg_data, &self.storage)? } - _msg => { + Message::Disconnect(disconnect) => { + return Err(RLPxError::HandshakeError(format!( + "Peer disconnected due to: {}", + disconnect.reason() + ))) + } + _ => { return Err(RLPxError::HandshakeError( "Expected a Status message".to_string(), )) From 0c24974fa46565b8829f00e30ca2828152b78748 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 8 Jan 2025 18:20:18 -0300 Subject: [PATCH 027/189] clippy --- crates/networking/p2p/rlpx/connection.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 16e2bc40e3..8644ca5883 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -254,7 +254,7 @@ impl RLPxConnection { )) } Message::Disconnect(disconnect) => { - return Err(RLPxError::HandshakeError(format!( + Err(RLPxError::HandshakeError(format!( "Peer disconnected due to: {}", disconnect.reason() ))) From 847e8bbcf57d2afeb0357bd75f90065526e0afa7 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 10:42:46 -0300 Subject: [PATCH 028/189] fmt --- crates/networking/p2p/rlpx/connection.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 8644ca5883..59fd467902 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -253,12 +253,10 @@ impl RLPxConnection { "No matching capabilities".to_string(), )) } - Message::Disconnect(disconnect) => { - Err(RLPxError::HandshakeError(format!( - "Peer disconnected due to: {}", - disconnect.reason() - ))) - } + Message::Disconnect(disconnect) => Err(RLPxError::HandshakeError(format!( + "Peer disconnected due to: {}", + disconnect.reason() + ))), _ => { // Fail if it is not a hello message Err(RLPxError::HandshakeError( From 4fcb6f37df6c6bafd701d11d2b9bf113656b562f Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 11:01:02 -0300 Subject: [PATCH 029/189] Debug --- crates/networking/p2p/rlpx/handshake.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/handshake.rs b/crates/networking/p2p/rlpx/handshake.rs index 91ea09d414..e90e17aa30 100644 --- a/crates/networking/p2p/rlpx/handshake.rs +++ b/crates/networking/p2p/rlpx/handshake.rs @@ -13,6 +13,7 @@ use k256::{ PublicKey, SecretKey, }; use rand::Rng; +use tracing::warn; use super::error::RLPxError; @@ -116,7 +117,9 @@ fn decrypt_message( // Verify the MAC. let expected_d = sha256_hmac(&mac_key, &[iv, c], size_data); - assert_eq!(d, expected_d); + if d != expected_d { + warn!("Mismatched MAC") + } // Decrypt the message with the AES key. let mut stream_cipher = Aes128Ctr64BE::new_from_slices(aes_key, iv)?; From c5af64019eee2fe8e427576dca3cab96eed1dea0 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 11:51:54 -0300 Subject: [PATCH 030/189] Debug --- crates/common/types/fork_id.rs | 4 ++-- crates/networking/p2p/rlpx/eth/backend.rs | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/common/types/fork_id.rs b/crates/common/types/fork_id.rs index ec3945612f..55cd3be25d 100644 --- a/crates/common/types/fork_id.rs +++ b/crates/common/types/fork_id.rs @@ -12,8 +12,8 @@ use super::{BlockHash, BlockNumber, ChainConfig}; #[derive(Debug, PartialEq)] pub struct ForkId { - fork_hash: H32, - fork_next: BlockNumber, + pub fork_hash: H32, + pub fork_next: BlockNumber, } impl ForkId { diff --git a/crates/networking/p2p/rlpx/eth/backend.rs b/crates/networking/p2p/rlpx/eth/backend.rs index 98bb5c8e5d..fa505434a2 100644 --- a/crates/networking/p2p/rlpx/eth/backend.rs +++ b/crates/networking/p2p/rlpx/eth/backend.rs @@ -68,11 +68,11 @@ pub fn validate_status(msg_data: StatusMessage, storage: &Store) -> Result<(), R )); } // Check ForkID - // if msg_data.fork_id != fork_id { - // return Err(RLPxError::HandshakeError( - // "Fork Id does not match".to_string(), - // )); - // } + if msg_data.fork_id.fork_hash != fork_id.fork_hash { + return Err(RLPxError::HandshakeError( + "Fork Id does not match".to_string(), + )); + } Ok(()) } From 356dfb3b1893129560a7d55090b641bafbbf89fc Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 12:44:54 -0300 Subject: [PATCH 031/189] Debug --- crates/networking/p2p/rlpx/connection.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index d97dd85181..f8ce5de37b 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -307,7 +307,7 @@ impl RLPxConnection { // the function below will yield immediately but the select will not match and // ignore the returned value. Some(broadcasted_msg) = Self::maybe_wait_for_broadcaster(&mut broadcaster_receive) => { - self.handle_broadcast(broadcasted_msg?).await? + self.handle_broadcast(broadcasted_msg.unwrap()).await.unwrap() } Some(message) = receiver.recv() => { self.send(message).await?; From 3c15f29c9043159faaf7d98762ee93967197665f Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 12:54:13 -0300 Subject: [PATCH 032/189] Debug --- crates/networking/p2p/rlpx/frame.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/frame.rs b/crates/networking/p2p/rlpx/frame.rs index cf5c15b463..4f72da1804 100644 --- a/crates/networking/p2p/rlpx/frame.rs +++ b/crates/networking/p2p/rlpx/frame.rs @@ -125,7 +125,9 @@ pub(crate) async fn read( .map_err(|_| RLPxError::CryptographyError("Invalid header mac".to_owned()))?, ); - assert_eq!(header_mac, expected_header_mac.0); + if header_mac != expected_header_mac.0 { + warn!("Mismatched mac"); + } let header_text = header_ciphertext; state.ingress_aes.apply_keystream(header_text); From dce4ea92dee6c3a18b7c3c29be599e6babc600e1 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 12:54:45 -0300 Subject: [PATCH 033/189] Debug --- crates/networking/p2p/rlpx/frame.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/networking/p2p/rlpx/frame.rs b/crates/networking/p2p/rlpx/frame.rs index 4f72da1804..3f9abcb6e6 100644 --- a/crates/networking/p2p/rlpx/frame.rs +++ b/crates/networking/p2p/rlpx/frame.rs @@ -6,6 +6,7 @@ use ethrex_core::H128; use ethrex_rlp::encode::RLPEncode as _; use sha3::Digest as _; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use tracing::warn; use super::{connection::Established, error::RLPxError}; From fe5ea94eb5dd72841c357fd777ffbc1a11510c34 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 12:59:53 -0300 Subject: [PATCH 034/189] Remove diff --- crates/networking/p2p/rlpx/connection.rs | 47 +++++++++--------------- 1 file changed, 17 insertions(+), 30 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index f8ce5de37b..58057e07c8 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -238,41 +238,28 @@ impl RLPxConnection { self.send(hello_msg).await?; // Receive Hello message - match self.receive().await? { - Message::Hello(hello_message) => { - self.capabilities = hello_message.capabilities; - - // Check if we have any capability in common - for cap in self.capabilities.clone() { - if SUPPORTED_CAPABILITIES.contains(&cap) { - return Ok(()); - } + if let Message::Hello(hello_message) = self.receive().await? { + self.capabilities = hello_message.capabilities; + + // Check if we have any capability in common + for cap in self.capabilities.clone() { + if SUPPORTED_CAPABILITIES.contains(&cap) { + return Ok(()); } - // Return error if not - Err(RLPxError::HandshakeError( - "No matching capabilities".to_string(), - )) - } - Message::Disconnect(disconnect) => { - warn!( - "Peer replied to Hello with Disconnect with reason: {}", - disconnect.reason() - ); - // Fail if it is not a hello message - Err(RLPxError::HandshakeError( - "Expected Hello message".to_string(), - )) - } - m => { - warn!("Peer replied to Hello with {m:?}"); - // Fail if it is not a hello message - Err(RLPxError::HandshakeError( - "Expected Hello message".to_string(), - )) } + // Return error if not + Err(RLPxError::HandshakeError( + "No matching capabilities".to_string(), + )) + } else { + // Fail if it is not a hello message + Err(RLPxError::HandshakeError( + "Expected Hello message".to_string(), + )) } } + async fn handle_peer_conn( &mut self, sender: mpsc::Sender, From 1aba97e602008b045152b354cfcf66f87fa65e7c Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 13:10:01 -0300 Subject: [PATCH 035/189] Debug --- crates/networking/p2p/net.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 2de5c1c695..da2c15a544 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -401,7 +401,9 @@ async fn peers_revalidation( // first check that the peers we ping have responded for node_id in previously_pinged_peers { let mut table = table.lock().await; - let peer = table.get_by_node_id_mut(node_id).unwrap(); + let Some(peer) = table.get_by_node_id_mut(node_id) else { + continue; + }; if let Some(has_answered) = peer.revalidation { if has_answered { @@ -769,6 +771,7 @@ async fn serve_requests( let tcp_socket = TcpSocket::new_v4().unwrap(); tcp_socket.bind(tcp_addr).unwrap(); let listener = tcp_socket.listen(50).unwrap(); + table.lock().await.show_peer_stats(); loop { let (stream, _peer_addr) = listener.accept().await.unwrap(); From e2e7d5b80c7d068e0995828d9d03f85e7ff2c8b5 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 15:00:43 -0300 Subject: [PATCH 036/189] Debug --- crates/networking/p2p/rlpx/eth/backend.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/rlpx/eth/backend.rs b/crates/networking/p2p/rlpx/eth/backend.rs index fa505434a2..36f6498942 100644 --- a/crates/networking/p2p/rlpx/eth/backend.rs +++ b/crates/networking/p2p/rlpx/eth/backend.rs @@ -1,5 +1,6 @@ use ethrex_core::{types::ForkId, U256}; use ethrex_storage::Store; +use tracing::warn; use crate::rlpx::error::RLPxError; @@ -69,9 +70,7 @@ pub fn validate_status(msg_data: StatusMessage, storage: &Store) -> Result<(), R } // Check ForkID if msg_data.fork_id.fork_hash != fork_id.fork_hash { - return Err(RLPxError::HandshakeError( - "Fork Id does not match".to_string(), - )); + warn!("Fork Id Hash does not match") } Ok(()) From bfd18623f8efa6cd0080a3a51e5109be635b6847 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 15:14:37 -0300 Subject: [PATCH 037/189] Debug --- crates/networking/p2p/kademlia.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index cf7a4ee18a..33023762c0 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -336,7 +336,9 @@ impl KademliaTable { let total_peers = self.iter_peers().count(); let active_peers = self.filter_peers(&active_filter).count(); let snap_active_peers = self.filter_peers(&snap_active_filter).count(); - info!("Snap Peers: {snap_active_peers} / Active Peers {active_peers} / Total Peers: {total_peers}") + info!("Snap Peers: {snap_active_peers} / Active Peers {active_peers} / Total Peers: {total_peers}"); + let active_peers = self.filter_peers(&active_filter).map(|peer| peer.node.node_id.to_string()).collect::>().join(", "); + info!("Active Peers ID: {active_peers}"); } } From 104de49c2c5fc8d804f6f63ae01cd378d73d5f91 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 15:26:17 -0300 Subject: [PATCH 038/189] Try to cartch duplicate connection attempt error --- crates/networking/p2p/rlpx/connection.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index a59b1e75d8..95ac92428e 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -51,7 +51,7 @@ use tokio::{ task, time::{sleep, Instant}, }; -use tracing::{debug, error}; +use tracing::{debug, error, warn}; const CAP_P2P: (Capability, u8) = (Capability::P2p, 5); const CAP_ETH: (Capability, u8) = (Capability::Eth, 68); const CAP_SNAP: (Capability, u8) = (Capability::Snap, 1); @@ -253,10 +253,18 @@ impl RLPxConnection { "No matching capabilities".to_string(), )) } - Message::Disconnect(disconnect) => Err(RLPxError::HandshakeError(format!( + Message::Disconnect(disconnect) => { + // Check if the disconnect is due to already being connected: + if disconnect.reason.is_some_and(|r| r ==0x05) { + warn!("Tried to connect to already connected peer"); + // Return Ok so we don;t discard a good peer + return Ok(()) + } + Err(RLPxError::HandshakeError(format!( "Peer disconnected due to: {}", disconnect.reason() - ))), + ))) + } _ => { // Fail if it is not a hello message Err(RLPxError::HandshakeError( From 184562a5ba6c61f3f904e9876c5c62811946f159 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 16:07:56 -0300 Subject: [PATCH 039/189] Try to cartch duplicate connection attempt error --- crates/networking/p2p/net.rs | 2 ++ crates/networking/p2p/rlpx/connection.rs | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index da2c15a544..b29caa25a3 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -793,6 +793,7 @@ async fn handle_peer_as_receiver( connection_broadcast: broadcast::Sender<(tokio::task::Id, Arc)>, ) { let mut conn = RLPxConnection::receiver(signer, stream, storage, connection_broadcast); + info!("Starting Peer as Receiver"); conn.start_peer(table).await; } @@ -810,6 +811,7 @@ async fn handle_peer_as_initiator( .connect(SocketAddr::new(node.ip, node.tcp_port)) .await .unwrap(); + info!("Starting Peer as Initiator"); match RLPxConnection::initiator(signer, msg, stream, storage, connection_broadcast).await { Ok(mut conn) => conn.start_peer(table).await, Err(e) => { diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 95ac92428e..1668551e65 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -257,8 +257,6 @@ impl RLPxConnection { // Check if the disconnect is due to already being connected: if disconnect.reason.is_some_and(|r| r ==0x05) { warn!("Tried to connect to already connected peer"); - // Return Ok so we don;t discard a good peer - return Ok(()) } Err(RLPxError::HandshakeError(format!( "Peer disconnected due to: {}", From 2232c0220921d27022e568e86a05f63659b4dc5c Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 16:20:17 -0300 Subject: [PATCH 040/189] Try to cartch duplicate connection attempt error --- crates/networking/p2p/net.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index b29caa25a3..a9f51505eb 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -218,6 +218,7 @@ async fn discover_peers_server( continue; } if peer.last_ping_hash.unwrap() == msg.ping_hash { + info!("Peer {} answered ping with pong", peer.node.node_id); table.lock().await.pong_answered(peer.node.node_id); let mut msg_buf = vec![0; read - 32]; @@ -396,7 +397,7 @@ async fn peers_revalidation( loop { interval.tick().await; - debug!("Running peer revalidation"); + info!("Running peer revalidation"); // first check that the peers we ping have responded for node_id in previously_pinged_peers { @@ -447,10 +448,10 @@ async fn peers_revalidation( table.update_peer_ping_with_revalidation(peer.node.node_id, ping_hash); previously_pinged_peers.insert(peer.node.node_id); - debug!("Pinging peer {:?} to re-validate!", peer.node.node_id); + info!("Pinging peer {} to re-validate!", peer.node.node_id); } - debug!("Peer revalidation finished"); + info!("Peer revalidation finished"); } } From d1c24d964e602059214233947947acb3d5dcacfd Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 16:54:27 -0300 Subject: [PATCH 041/189] Dont initiate a second connection when revalidating peer --- crates/networking/p2p/net.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index a9f51505eb..af4e4000d2 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -218,8 +218,11 @@ async fn discover_peers_server( continue; } if peer.last_ping_hash.unwrap() == msg.ping_hash { - info!("Peer {} answered ping with pong", peer.node.node_id); + debug!("Peer {} answered ping with pong", peer.node.node_id); table.lock().await.pong_answered(peer.node.node_id); + if peer.channels.is_some() { + continue; + } let mut msg_buf = vec![0; read - 32]; buf[32..read].clone_into(&mut msg_buf); From 74ea3b375ffbc041f2872b346c99dfee1ba39bf4 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 9 Jan 2025 18:49:00 -0300 Subject: [PATCH 042/189] Keep track of snap state --- crates/networking/p2p/sync.rs | 24 +++++++++ crates/storage/store/engines/api.rs | 14 +++++ crates/storage/store/engines/in_memory.rs | 41 +++++++++++++++ crates/storage/store/engines/libmdbx.rs | 63 ++++++++++++++++++++++- crates/storage/store/engines/utils.rs | 29 ++++++++++- crates/storage/store/storage.rs | 24 +++++++++ 6 files changed, 193 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 9c9eb85ac4..48a6bc6905 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -100,6 +100,23 @@ impl SyncManager { // We will begin from the current head so that we download the earliest state first // This step is not parallelized let mut all_block_hashes = vec![]; + // Check if we have some blocks downloaded from a previous sync attempt + if let Some(last_header) = store.get_latest_downloaded_header()? { + // We might have more headers than bodies downloaded so we should queue missing bodies for download + let last_body = match store.get_latest_downloaded_body()? { + Some(hash) => hash, + None => current_head, + }; + if last_body != last_header { + let mut parent = last_header; + while parent != last_body { + all_block_hashes.insert(0, parent); + parent = store.get_block_header_by_hash(parent)?.unwrap().parent_hash; + } + } + // Set latest downloaded header as current head for header fetching + current_head = last_header; + } loop { let peer = self .peers @@ -126,6 +143,8 @@ impl SyncManager { // Update current fetch head if needed if !sync_head_found { current_head = *block_hashes.last().unwrap(); + // Update snap state + store.set_latest_downloaded_header(current_head)?; } // Store headers and save hashes for full block retrieval all_block_hashes.extend_from_slice(&block_hashes[..]); @@ -206,6 +225,9 @@ impl SyncManager { download_and_run_blocks(all_block_hashes, self.peers.clone(), store.clone()).await? } } + // Finished a sync cycle without aborting halfway, clear current state (TODO: write pivot here too) + store.clear_latest_downloaded_header(); + store.clear_latest_downloaded_body(); Ok(()) } } @@ -263,6 +285,8 @@ async fn store_block_bodies( debug!(" Received {} Block Bodies", block_bodies.len()); // Track which bodies we have already fetched let current_block_hashes = block_hashes.drain(..block_bodies.len()); + // Update snap state + store.set_latest_downloaded_body(*current_block_hashes.as_ref().last().unwrap())?; // Add bodies to storage for (hash, body) in current_block_hashes.zip(block_bodies.into_iter()) { store.add_block_body(hash, body)?; diff --git a/crates/storage/store/engines/api.rs b/crates/storage/store/engines/api.rs index 0c372609e7..4901be6d1a 100644 --- a/crates/storage/store/engines/api.rs +++ b/crates/storage/store/engines/api.rs @@ -249,4 +249,18 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { ) -> Result<(), StoreError>; fn get_receipts_for_block(&self, block_hash: &BlockHash) -> Result, StoreError>; + + // Snap State methods + + fn set_latest_downloaded_header(&self, block_hash: BlockHash) -> Result<(), StoreError>; + + fn get_latest_downloaded_header(&self) -> Result, StoreError>; + + fn clear_latest_downloaded_header(&self) -> Result<(), StoreError>; + + fn set_latest_downloaded_body(&self, block_hash: BlockHash) -> Result<(), StoreError>; + + fn get_latest_downloaded_body(&self) -> Result, StoreError>; + + fn clear_latest_downloaded_body(&self) -> Result<(), StoreError>; } diff --git a/crates/storage/store/engines/in_memory.rs b/crates/storage/store/engines/in_memory.rs index f5c9b1f7fe..ff823b05f7 100644 --- a/crates/storage/store/engines/in_memory.rs +++ b/crates/storage/store/engines/in_memory.rs @@ -38,6 +38,8 @@ struct StoreInner { // Stores local blocks by payload id payloads: HashMap, pending_blocks: HashMap, + // Stores current Snap Sate + snap_state: SnapState, } #[derive(Default, Debug)] @@ -52,6 +54,17 @@ struct ChainData { pending_block_number: Option, } +// Keeps track of the state left by the latest snap attempt +#[derive(Default, Debug)] +pub struct SnapState { + /// The last block number used as a pivot for snap-sync + last_snap_pivot: u64, + /// Latest downloaded block header's hash from a previously aborted sync + last_downloaded_header_hash: Option, + /// Latest downloaded block body's hash from a previously aborted sync + last_downloaded_body_hash: Option, +} + impl Store { pub fn new() -> Self { Self::default() @@ -423,6 +436,34 @@ impl StoreEngine for Store { .insert(payload_id, (block, block_value, blobs_bundle, completed)); Ok(()) } + + fn set_latest_downloaded_header(&self, block_hash: BlockHash) -> Result<(), StoreError> { + self.inner().snap_state.last_downloaded_header_hash = Some(block_hash); + Ok(()) + } + + fn get_latest_downloaded_header(&self) -> Result, StoreError> { + Ok(self.inner().snap_state.last_downloaded_header_hash) + } + + fn clear_latest_downloaded_header(&self) -> Result<(), StoreError> { + self.inner().snap_state.last_downloaded_header_hash = None; + Ok(()) + } + + fn set_latest_downloaded_body(&self, block_hash: BlockHash) -> Result<(), StoreError> { + self.inner().snap_state.last_downloaded_body_hash = Some(block_hash); + Ok(()) + } + + fn get_latest_downloaded_body(&self) -> Result, StoreError> { + Ok(self.inner().snap_state.last_downloaded_body_hash) + } + + fn clear_latest_downloaded_body(&self) -> Result<(), StoreError> { + self.inner().snap_state.last_downloaded_body_hash = None; + Ok(()) + } } impl Debug for Store { diff --git a/crates/storage/store/engines/libmdbx.rs b/crates/storage/store/engines/libmdbx.rs index 5509008e19..423a6409c6 100644 --- a/crates/storage/store/engines/libmdbx.rs +++ b/crates/storage/store/engines/libmdbx.rs @@ -1,5 +1,5 @@ use super::api::StoreEngine; -use super::utils::ChainDataIndex; +use super::utils::{ChainDataIndex, SnapStateIndex}; use crate::error::StoreError; use crate::rlp::{ AccountCodeHashRLP, AccountCodeRLP, BlockBodyRLP, BlockHashRLP, BlockHeaderRLP, BlockRLP, @@ -72,6 +72,17 @@ impl Store { txn.get::(key).map_err(StoreError::LibmdbxError) } + // Helper method to remove a value from a libmdbx table + fn delete(&self, key: T::Key) -> Result<(), StoreError> { + let txn = self + .db + .begin_readwrite() + .map_err(StoreError::LibmdbxError)?; + txn.delete::(key, None) + .map_err(StoreError::LibmdbxError)?; + txn.commit().map_err(StoreError::LibmdbxError) + } + fn get_block_hash_by_block_number( &self, number: BlockNumber, @@ -514,6 +525,42 @@ impl StoreEngine for Store { Ok(receipts.into_iter().map(|receipt| receipt.to()).collect()) } + + fn set_latest_downloaded_header(&self, block_hash: BlockHash) -> Result<(), StoreError> { + self.write::( + SnapStateIndex::LatestDownloadedHeader, + block_hash.encode_to_vec(), + ) + } + + fn get_latest_downloaded_header(&self) -> Result, StoreError> { + self.read::(SnapStateIndex::LatestDownloadedHeader)? + .map(|ref h| BlockHash::decode(h)) + .transpose() + .map_err(StoreError::RLPDecode) + } + + fn clear_latest_downloaded_header(&self) -> Result<(), StoreError> { + self.delete::(SnapStateIndex::LatestDownloadedHeader) + } + + fn set_latest_downloaded_body(&self, block_hash: BlockHash) -> Result<(), StoreError> { + self.write::( + SnapStateIndex::LatestDownloadedBody, + block_hash.encode_to_vec(), + ) + } + + fn get_latest_downloaded_body(&self) -> Result, StoreError> { + self.read::(SnapStateIndex::LatestDownloadedBody)? + .map(|ref h| BlockHash::decode(h)) + .transpose() + .map_err(StoreError::RLPDecode) + } + + fn clear_latest_downloaded_body(&self) -> Result<(), StoreError> { + self.delete::(SnapStateIndex::LatestDownloadedBody) + } } impl Debug for Store { @@ -575,6 +622,12 @@ table!( ( ChainData ) ChainDataIndex => Vec ); +table!( + /// Stores snap state, each value is unique and stored as its rlp encoding + /// See [SnapStateIndex] for available values + ( SnapState ) SnapStateIndex => Vec +); + // Trie storages table!( @@ -659,6 +712,13 @@ impl Encodable for ChainDataIndex { } } +impl Encodable for SnapStateIndex { + type Encoded = [u8; 4]; + + fn encode(self) -> Self::Encoded { + (self as u32).encode() + } +} /// Initializes a new database with the provided path. If the path is `None`, the database /// will be temporary. pub fn init_db(path: Option>) -> Database { @@ -677,6 +737,7 @@ pub fn init_db(path: Option>) -> Database { table_info!(CanonicalBlockHashes), table_info!(Payloads), table_info!(PendingBlocks), + table_info!(SnapState), ] .into_iter() .collect(); diff --git a/crates/storage/store/engines/utils.rs b/crates/storage/store/engines/utils.rs index 60d3e66e3e..3af5984acd 100644 --- a/crates/storage/store/engines/utils.rs +++ b/crates/storage/store/engines/utils.rs @@ -1,5 +1,5 @@ /// Represents the key for each unique value of the chain data stored in the db -// (TODO: Remove this comment once full) Will store chain-specific data such as chain id and latest finalized/pending/safe block number +// Stores chain-specific data such as chain id and latest finalized/pending/safe block number #[derive(Debug, Copy, Clone)] pub enum ChainDataIndex { ChainConfig = 0, @@ -34,3 +34,30 @@ impl From for ChainDataIndex { } } } + +/// Represents the key for each unique value of the chain data stored in the db +// Stores chain-specific data such as chain id and latest finalized/pending/safe block number +#[derive(Debug, Copy, Clone)] +pub enum SnapStateIndex { + // Pivot used by the last completed snap sync cycle + LastPivot = 0, + // Hash of the last downloaded header in a previous sync that was aborted + LatestDownloadedHeader = 1, + // Hash of the last downloaded body in a previous sync that was aborted + LatestDownloadedBody = 2, +} + +impl From for SnapStateIndex { + fn from(value: u8) -> Self { + match value { + x if x == SnapStateIndex::LastPivot as u8 => SnapStateIndex::LastPivot, + x if x == SnapStateIndex::LatestDownloadedHeader as u8 => { + SnapStateIndex::LatestDownloadedHeader + } + x if x == SnapStateIndex::LatestDownloadedBody as u8 => { + SnapStateIndex::LatestDownloadedBody + } + _ => panic!("Invalid value when casting to SnapDataIndex: {}", value), + } + } +} diff --git a/crates/storage/store/storage.rs b/crates/storage/store/storage.rs index f2785c1020..39e1422b74 100644 --- a/crates/storage/store/storage.rs +++ b/crates/storage/store/storage.rs @@ -999,6 +999,30 @@ impl Store { .get_node(node_hash.into())? .is_some()) } + + pub fn set_latest_downloaded_header(&self, block_hash: BlockHash) -> Result<(), StoreError> { + self.engine.set_latest_downloaded_header(block_hash) + } + + pub fn get_latest_downloaded_header(&self) -> Result, StoreError> { + self.engine.get_latest_downloaded_header() + } + + pub fn clear_latest_downloaded_header(&self) -> Result<(), StoreError> { + self.engine.clear_latest_downloaded_header() + } + + pub fn set_latest_downloaded_body(&self, block_hash: BlockHash) -> Result<(), StoreError> { + self.engine.set_latest_downloaded_body(block_hash) + } + + pub fn get_latest_downloaded_body(&self) -> Result, StoreError> { + self.engine.get_latest_downloaded_body() + } + + pub fn clear_latest_downloaded_body(&self) -> Result<(), StoreError> { + self.engine.clear_latest_downloaded_body() + } } pub fn hash_address(address: &Address) -> Vec { From 519b9dad038c52cd5c8d435b92d87438cffb9226 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 10 Jan 2025 10:55:30 -0300 Subject: [PATCH 043/189] Keep track of snap state --- crates/networking/p2p/net.rs | 2 -- crates/networking/p2p/sync.rs | 14 ++++++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index af4e4000d2..7a3959639f 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -797,7 +797,6 @@ async fn handle_peer_as_receiver( connection_broadcast: broadcast::Sender<(tokio::task::Id, Arc)>, ) { let mut conn = RLPxConnection::receiver(signer, stream, storage, connection_broadcast); - info!("Starting Peer as Receiver"); conn.start_peer(table).await; } @@ -815,7 +814,6 @@ async fn handle_peer_as_initiator( .connect(SocketAddr::new(node.ip, node.tcp_port)) .await .unwrap(); - info!("Starting Peer as Initiator"); match RLPxConnection::initiator(signer, msg, stream, storage, connection_broadcast).await { Ok(mut conn) => conn.start_peer(table).await, Err(e) => { diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 87b6350fc8..d2214cd29d 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -102,11 +102,21 @@ impl SyncManager { let mut all_block_hashes = vec![]; // Check if we have some blocks downloaded from a previous sync attempt if let Some(last_header) = store.get_latest_downloaded_header()? { + // Debug Code Block + { + let last_downloaded_header = store.get_block_header_by_hash(last_header)?.unwrap().number; + info!("Resuming header download from last downloaded header with number: {last_downloaded_header}"); + } // We might have more headers than bodies downloaded so we should queue missing bodies for download let last_body = match store.get_latest_downloaded_body()? { Some(hash) => hash, None => current_head, }; + // Debug Code Block + { + let last_downloaded_body = store.get_block_header_by_hash(last_body)?.unwrap().number; + info!("Resuming body download from last downloaded body with number: {last_downloaded_body}"); + } if last_body != last_header { let mut parent = last_header; while parent != last_body { @@ -226,8 +236,8 @@ impl SyncManager { } } // Finished a sync cycle without aborting halfway, clear current state (TODO: write pivot here too) - store.clear_latest_downloaded_header(); - store.clear_latest_downloaded_body(); + store.clear_latest_downloaded_header()?; + store.clear_latest_downloaded_body()?; Ok(()) } } From 8972955e0de16d3719935ed8fed14ecc05571979 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 10 Jan 2025 10:58:28 -0300 Subject: [PATCH 044/189] Keep track of snap state --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index d2214cd29d..72164bb9d7 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -140,7 +140,7 @@ impl SyncManager { .request_block_headers(current_head, BlockRequestOrder::OldToNew) .await { - info!("Received {} block headers", block_headers.len()); + info!("Received {} block headers| Last Number: {}", block_headers.len(), block_headers.last().as_ref().unwrap().number); let mut block_hashes = block_headers .iter() .map(|header| header.compute_block_hash()) From 9b0bf77004d2bca72888b8434025cbb7799b3a0c Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 10 Jan 2025 14:47:29 -0300 Subject: [PATCH 045/189] Debug --- crates/networking/p2p/net.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 7a3959639f..d0b8c6089c 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -221,6 +221,7 @@ async fn discover_peers_server( debug!("Peer {} answered ping with pong", peer.node.node_id); table.lock().await.pong_answered(peer.node.node_id); if peer.channels.is_some() { + info!("Skip trying to connect to already connected peer"); continue; } From 4020234e5ba380d8c070eabe16b9ee1c75de921e Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 10 Jan 2025 15:00:06 -0300 Subject: [PATCH 046/189] Debug --- crates/networking/p2p/net.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index d0b8c6089c..c25bd36307 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -218,10 +218,10 @@ async fn discover_peers_server( continue; } if peer.last_ping_hash.unwrap() == msg.ping_hash { - debug!("Peer {} answered ping with pong", peer.node.node_id); + info!("Peer {} answered ping with pong", peer.node.node_id); table.lock().await.pong_answered(peer.node.node_id); if peer.channels.is_some() { - info!("Skip trying to connect to already connected peer"); + info!("Skip trying to connect to already connected peer {}", peer.node.node_id); continue; } @@ -412,8 +412,10 @@ async fn peers_revalidation( if let Some(has_answered) = peer.revalidation { if has_answered { + info!("Peer {node_id} answered revalidation ping"); peer.increment_liveness(); } else { + info!("Peer {node_id} hasn't answered revalidation ping"); peer.decrement_liveness(); } } @@ -421,9 +423,10 @@ async fn peers_revalidation( peer.revalidation = None; if peer.liveness == 0 { + info!("Replacing Peer {node_id} due to revalidation"); let new_peer = table.replace_peer(node_id); if let Some(new_peer) = new_peer { - let ping_hash = ping( + let ping_hash: Option = ping( &udp_socket, udp_addr, SocketAddr::new(new_peer.node.ip, new_peer.node.udp_port), From 75ed3ca39d935817f217902205d7e7047c0f74a7 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 10 Jan 2025 15:13:57 -0300 Subject: [PATCH 047/189] Debug --- crates/networking/p2p/sync.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 72164bb9d7..8687836de7 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -121,7 +121,9 @@ impl SyncManager { let mut parent = last_header; while parent != last_body { all_block_hashes.insert(0, parent); - parent = store.get_block_header_by_hash(parent)?.unwrap().parent_hash; + let parent_header = store.get_block_header_by_hash(parent)?.unwrap(); + println!("Queuing body {} for download", parent_header.number); + parent = parent_header.parent_hash; } } // Set latest downloaded header as current head for header fetching From d3cb204d5997ca0edda838b8a986b869055f3a6d Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 10 Jan 2025 15:23:23 -0300 Subject: [PATCH 048/189] Disregard bodies --- crates/networking/p2p/sync.rs | 36 +++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 8687836de7..a6e736ba3e 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -108,24 +108,24 @@ impl SyncManager { info!("Resuming header download from last downloaded header with number: {last_downloaded_header}"); } // We might have more headers than bodies downloaded so we should queue missing bodies for download - let last_body = match store.get_latest_downloaded_body()? { - Some(hash) => hash, - None => current_head, - }; + // let last_body = match store.get_latest_downloaded_body()? { + // Some(hash) => hash, + // None => current_head, + // }; // Debug Code Block - { - let last_downloaded_body = store.get_block_header_by_hash(last_body)?.unwrap().number; - info!("Resuming body download from last downloaded body with number: {last_downloaded_body}"); - } - if last_body != last_header { - let mut parent = last_header; - while parent != last_body { - all_block_hashes.insert(0, parent); - let parent_header = store.get_block_header_by_hash(parent)?.unwrap(); - println!("Queuing body {} for download", parent_header.number); - parent = parent_header.parent_hash; - } - } + // { + // let last_downloaded_body = store.get_block_header_by_hash(last_body)?.unwrap().number; + // info!("Resuming body download from last downloaded body with number: {last_downloaded_body}"); + // } + // if last_body != last_header { + // let mut parent = last_header; + // while parent != last_body { + // all_block_hashes.insert(0, parent); + // let parent_header = store.get_block_header_by_hash(parent)?.unwrap(); + // println!("Queuing body {} for download", parent_header.number); + // parent = parent_header.parent_hash; + // } + // } // Set latest downloaded header as current head for header fetching current_head = last_header; } @@ -174,7 +174,7 @@ impl SyncManager { // snap-sync: launch tasks to fetch blocks and state in parallel // - Fetch each block's body and its receipt via eth p2p requests // - Fetch the pivot block's state via snap p2p requests - // - Execute blocks after the pivote (like in full-sync) + // - Execute blocks after the pivot (like in full-sync) let store_bodies_handle = tokio::spawn(store_block_bodies( all_block_hashes.clone(), self.peers.clone(), From fc7249e104d051f56c3bb7b4f7bd25888fda82b0 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 10 Jan 2025 16:16:11 -0300 Subject: [PATCH 049/189] Download only bodies past pivot --- crates/networking/p2p/net.rs | 10 +++++----- crates/networking/p2p/sync.rs | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index c25bd36307..4b66b8ab37 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -218,10 +218,10 @@ async fn discover_peers_server( continue; } if peer.last_ping_hash.unwrap() == msg.ping_hash { - info!("Peer {} answered ping with pong", peer.node.node_id); + debug!("Peer {} answered ping with pong", peer.node.node_id); table.lock().await.pong_answered(peer.node.node_id); if peer.channels.is_some() { - info!("Skip trying to connect to already connected peer {}", peer.node.node_id); + debug!("Skip trying to connect to already connected peer {}", peer.node.node_id); continue; } @@ -401,7 +401,7 @@ async fn peers_revalidation( loop { interval.tick().await; - info!("Running peer revalidation"); + debug!("Running peer revalidation"); // first check that the peers we ping have responded for node_id in previously_pinged_peers { @@ -412,10 +412,10 @@ async fn peers_revalidation( if let Some(has_answered) = peer.revalidation { if has_answered { - info!("Peer {node_id} answered revalidation ping"); + debug!("Peer {node_id} answered revalidation ping"); peer.increment_liveness(); } else { - info!("Peer {node_id} hasn't answered revalidation ping"); + debug!("Peer {node_id} hasn't answered revalidation ping"); peer.decrement_liveness(); } } diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index a6e736ba3e..648178d680 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -175,16 +175,16 @@ impl SyncManager { // - Fetch each block's body and its receipt via eth p2p requests // - Fetch the pivot block's state via snap p2p requests // - Execute blocks after the pivot (like in full-sync) - let store_bodies_handle = tokio::spawn(store_block_bodies( - all_block_hashes.clone(), - self.peers.clone(), - store.clone(), - )); let mut pivot_idx = if all_block_hashes.len() > MIN_FULL_BLOCKS { all_block_hashes.len() - MIN_FULL_BLOCKS } else { all_block_hashes.len() - 1 }; + let store_bodies_handle = tokio::spawn(store_block_bodies( + all_block_hashes[pivot_idx..].to_vec(), + self.peers.clone(), + store.clone(), + )); let mut pivot_header = store .get_block_header_by_hash(all_block_hashes[pivot_idx])? .ok_or(SyncError::CorruptDB)?; From b38351af107945aa4e299a4ecb43615365b5bb0c Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 10 Jan 2025 16:24:34 -0300 Subject: [PATCH 050/189] Mute revalidation' ' --- crates/networking/p2p/net.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 4b66b8ab37..1caadd3b9f 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -423,7 +423,7 @@ async fn peers_revalidation( peer.revalidation = None; if peer.liveness == 0 { - info!("Replacing Peer {node_id} due to revalidation"); + debug!("Replacing Peer {node_id} due to revalidation"); let new_peer = table.replace_peer(node_id); if let Some(new_peer) = new_peer { let ping_hash: Option = ping( @@ -455,10 +455,10 @@ async fn peers_revalidation( table.update_peer_ping_with_revalidation(peer.node.node_id, ping_hash); previously_pinged_peers.insert(peer.node.node_id); - info!("Pinging peer {} to re-validate!", peer.node.node_id); + debug!("Pinging peer {} to re-validate!", peer.node.node_id); } - info!("Peer revalidation finished"); + debug!("Peer revalidation finished"); } } From 8b10f5d9de394a72c0b409eac162c00f1e9ce205 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 10 Jan 2025 16:26:15 -0300 Subject: [PATCH 051/189] Debug --- crates/networking/p2p/sync.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 648178d680..2102906105 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -292,7 +292,7 @@ async fn store_block_bodies( ) -> Result<(), SyncError> { loop { let peer = peers.lock().await.get_peer_channels(Capability::Eth).await; - info!("Requesting Block Headers "); + info!("Requesting Block Bodies "); if let Some(block_bodies) = peer.request_block_bodies(block_hashes.clone()).await { info!(" Received {} Block Bodies", block_bodies.len()); // Track which bodies we have already fetched @@ -321,7 +321,7 @@ async fn store_receipts( ) -> Result<(), SyncError> { loop { let peer = peers.lock().await.get_peer_channels(Capability::Eth).await; - info!("Requesting Block Headers "); + info!("Requesting Receipts"); if let Some(receipts) = peer.request_receipts(block_hashes.clone()).await { info!(" Received {} Receipts", receipts.len()); // Track which blocks we have already fetched receipts for @@ -344,6 +344,7 @@ async fn rebuild_state_trie( peers: Arc>, store: Store, ) -> Result { + info!("Rebuilding State Trie"); // Spawn storage & bytecode fetchers let (bytecode_sender, bytecode_receiver) = mpsc::channel::>(500); let (storage_sender, storage_receiver) = mpsc::channel::>(500); From 16116c46b313245ccf891b21155866142e371556 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 10 Jan 2025 18:06:24 -0300 Subject: [PATCH 052/189] Debug --- crates/networking/p2p/peer_channels.rs | 15 +++++++++++---- crates/networking/p2p/rlpx/frame.rs | 2 +- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index f352bd834f..b6ae5415b6 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -9,6 +9,7 @@ use ethrex_rlp::encode::RLPEncode; use ethrex_trie::Nibbles; use ethrex_trie::{verify_range, Node}; use tokio::sync::{mpsc, Mutex}; +use tracing::warn; use crate::{ rlpx::{ @@ -91,13 +92,19 @@ impl PeerChannels { return Some(block_headers) } // Ignore replies that don't match the expected id (such as late responses) - Some(_) => continue, - None => return None, + Some(a) => {warn!("UNEXPECTED RESPONSE: {a:?}"); continue}, + None => {warn!("NO RESPONSE");return None}, } } }) - .await - .ok()??; + .await; + if block_headers.is_err() { + warn!("PEER TIMEOUT"); + } + let block_headers = block_headers.ok()??; + if block_headers.is_empty() { + warn!("EMPTY BLOCK HEADERS RESPONSE"); + } (!block_headers.is_empty()).then_some(block_headers) } diff --git a/crates/networking/p2p/rlpx/frame.rs b/crates/networking/p2p/rlpx/frame.rs index 3f9abcb6e6..3f2771319f 100644 --- a/crates/networking/p2p/rlpx/frame.rs +++ b/crates/networking/p2p/rlpx/frame.rs @@ -135,7 +135,7 @@ pub(crate) async fn read( // header-data = [capability-id, context-id] // Both are unused, and always zero - assert_eq!(&header_text[3..6], &(0_u8, 0_u8).encode_to_vec()); + //assert_eq!(&header_text[3..6], &(0_u8, 0_u8).encode_to_vec()); let frame_size: usize = u32::from_be_bytes([0, header_text[0], header_text[1], header_text[2]]) .try_into() From c72de56096177c1985502a36a093936c2a7ccc1a Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 13 Jan 2025 11:58:48 -0300 Subject: [PATCH 053/189] Do not switch to full sync on aborted sync --- crates/networking/p2p/sync.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 2102906105..1528e04c59 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -79,8 +79,6 @@ impl SyncManager { "Sync finished, time elapsed: {} secs", start_time.elapsed().as_secs() ); - // Next sync will be full-sync - self.sync_mode = SyncMode::Full; } Err(error) => warn!( "Sync failed due to {error}, time elapsed: {} secs ", @@ -240,6 +238,8 @@ impl SyncManager { // Finished a sync cycle without aborting halfway, clear current state (TODO: write pivot here too) store.clear_latest_downloaded_header()?; store.clear_latest_downloaded_body()?; + // Next sync will be full-sync + self.sync_mode = SyncMode::Full; Ok(()) } } From c3ec08eee678084f02c092dbc3c22ddc1d8686af Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 13 Jan 2025 12:06:38 -0300 Subject: [PATCH 054/189] Debug --- crates/networking/p2p/kademlia.rs | 6 +++++- crates/networking/p2p/net.rs | 5 ++++- crates/networking/p2p/peer_channels.rs | 13 ++++++++++--- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index 33023762c0..63af382410 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -337,7 +337,11 @@ impl KademliaTable { let active_peers = self.filter_peers(&active_filter).count(); let snap_active_peers = self.filter_peers(&snap_active_filter).count(); info!("Snap Peers: {snap_active_peers} / Active Peers {active_peers} / Total Peers: {total_peers}"); - let active_peers = self.filter_peers(&active_filter).map(|peer| peer.node.node_id.to_string()).collect::>().join(", "); + let active_peers = self + .filter_peers(&active_filter) + .map(|peer| peer.node.node_id.to_string()) + .collect::>() + .join(", "); info!("Active Peers ID: {active_peers}"); } } diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 1caadd3b9f..30cf359ca8 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -221,7 +221,10 @@ async fn discover_peers_server( debug!("Peer {} answered ping with pong", peer.node.node_id); table.lock().await.pong_answered(peer.node.node_id); if peer.channels.is_some() { - debug!("Skip trying to connect to already connected peer {}", peer.node.node_id); + debug!( + "Skip trying to connect to already connected peer {}", + peer.node.node_id + ); continue; } diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index b6ae5415b6..1af4ef1c81 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -9,7 +9,7 @@ use ethrex_rlp::encode::RLPEncode; use ethrex_trie::Nibbles; use ethrex_trie::{verify_range, Node}; use tokio::sync::{mpsc, Mutex}; -use tracing::warn; +use tracing::{info, warn}; use crate::{ rlpx::{ @@ -92,8 +92,14 @@ impl PeerChannels { return Some(block_headers) } // Ignore replies that don't match the expected id (such as late responses) - Some(a) => {warn!("UNEXPECTED RESPONSE: {a:?}"); continue}, - None => {warn!("NO RESPONSE");return None}, + Some(a) => { + warn!("UNEXPECTED RESPONSE: {a:?}"); + continue; + } + None => { + warn!("NO RESPONSE"); + return None; + } } } }) @@ -211,6 +217,7 @@ impl PeerChannels { }) .await .ok()??; + info!("Peer returned accounts: {accounts:?}, proof: {proof:?}"); // Unzip & validate response let proof = encodable_to_proof(&proof); let (account_hashes, accounts): (Vec<_>, Vec<_>) = accounts From 62db683859bec547600d6f12ea46ab66466ebc5d Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 13 Jan 2025 14:26:29 -0300 Subject: [PATCH 055/189] Debug --- crates/networking/p2p/rlpx/connection.rs | 1 - crates/networking/p2p/sync.rs | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 7daf7e7015..90503ad6ab 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -266,7 +266,6 @@ impl RLPxConnection { } } - async fn handle_peer_conn( &mut self, sender: mpsc::Sender, diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 1528e04c59..408f5ac9ef 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -186,6 +186,7 @@ impl SyncManager { let mut pivot_header = store .get_block_header_by_hash(all_block_hashes[pivot_idx])? .ok_or(SyncError::CorruptDB)?; + info!("Selected block {} as pivot for snap sync", pivot_header.number); let mut stale_pivot = !rebuild_state_trie(pivot_header.state_root, self.peers.clone(), store.clone()) .await?; From 9008b1bc8bb660dc24499d36782b6db27a49d239 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 13 Jan 2025 15:25:21 -0300 Subject: [PATCH 056/189] Debug --- crates/networking/p2p/sync.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 408f5ac9ef..62dd306bb6 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -102,7 +102,8 @@ impl SyncManager { if let Some(last_header) = store.get_latest_downloaded_header()? { // Debug Code Block { - let last_downloaded_header = store.get_block_header_by_hash(last_header)?.unwrap().number; + let last_downloaded_header = + store.get_block_header_by_hash(last_header)?.unwrap().number; info!("Resuming header download from last downloaded header with number: {last_downloaded_header}"); } // We might have more headers than bodies downloaded so we should queue missing bodies for download @@ -140,7 +141,11 @@ impl SyncManager { .request_block_headers(current_head, BlockRequestOrder::OldToNew) .await { - info!("Received {} block headers| Last Number: {}", block_headers.len(), block_headers.last().as_ref().unwrap().number); + info!( + "Received {} block headers| Last Number: {}", + block_headers.len(), + block_headers.last().as_ref().unwrap().number + ); let mut block_hashes = block_headers .iter() .map(|header| header.compute_block_hash()) @@ -158,9 +163,12 @@ impl SyncManager { } // Store headers and save hashes for full block retrieval all_block_hashes.extend_from_slice(&block_hashes[..]); - store.add_block_headers(block_hashes, block_headers)?; + store.add_block_headers(block_hashes, block_headers.clone())?; if sync_head_found { + let sync_header = block_headers.iter().find(|h| h.compute_block_hash() == sync_head).unwrap(); + info!("Found sync head at block: {}", sync_header.number); + // No more headers to request break; } From de2f849d987375b6384bb3fcb28a917e41eced2a Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 13 Jan 2025 18:51:04 -0300 Subject: [PATCH 057/189] Debug --- crates/networking/p2p/peer_channels.rs | 7 ++++++- crates/networking/p2p/sync.rs | 2 ++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 1af4ef1c81..0754f45fc3 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -217,7 +217,9 @@ impl PeerChannels { }) .await .ok()??; - info!("Peer returned accounts: {accounts:?}, proof: {proof:?}"); + if accounts.is_empty() && proof.is_empty() { + info!("Peer returned empty account range"); + } // Unzip & validate response let proof = encodable_to_proof(&proof); let (account_hashes, accounts): (Vec<_>, Vec<_>) = accounts @@ -313,6 +315,9 @@ impl PeerChannels { }) .await .ok()??; + if slots.is_empty() && proof.is_empty() { + info!("Peer returned empty storage ranges"); + } // Check we got a reasonable amount of storage ranges if slots.len() > storage_roots.len() || slots.is_empty() { return None; diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 62dd306bb6..f2e2d008e2 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -463,6 +463,7 @@ async fn bytecode_fetcher( match receiver.recv().await { Some(code_hashes) if !code_hashes.is_empty() => { pending_bytecodes.extend(code_hashes); + info!("Received incoming bytecode request, current batch: {}/{BATCH_SIZE}", pending_bytecodes.len()) } // Disconnect / Empty message signaling no more bytecodes to sync _ => incoming = false, @@ -519,6 +520,7 @@ async fn storage_fetcher( match receiver.recv().await { Some(account_hashes_and_roots) if !account_hashes_and_roots.is_empty() => { pending_storage.extend(account_hashes_and_roots); + info!("Received incoming storage range request, current batch: {}/{BATCH_SIZE}", pending_storage.len()) } // Disconnect / Empty message signaling no more bytecodes to sync _ => incoming = false, From d092b1210e3dbe7002bf2d04b71e17e3d7978141 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 10:21:41 -0300 Subject: [PATCH 058/189] Mute connection errors --- crates/networking/p2p/rlpx/connection.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 90503ad6ab..a9d5fbdd3b 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -189,13 +189,13 @@ impl RLPxConnection { reason: self.match_disconnect_reason(&error), })) .await - .unwrap_or_else(|e| error!("Could not send Disconnect message: ({e}).")); + .unwrap_or_else(|e| debug!("Could not send Disconnect message: ({e}).")); if let Ok(node_id) = self.get_remote_node_id() { // Discard peer from kademlia table error!("{error_text}: ({error}), discarding peer {node_id}"); table.lock().await.replace_peer(node_id); } else { - error!("{error_text}: ({error}), unknown peer") + debug!("{error_text}: ({error}), unknown peer") } } From f4cd209968cf7ce116d1b56831b8d61b14dd71aa Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 10:44:43 -0300 Subject: [PATCH 059/189] Use current SyncStatus when applying a fork choice update --- crates/networking/rpc/engine/fork_choice.rs | 21 ++++++++++------ crates/networking/rpc/rpc.rs | 28 ++++++++++++++++++++- 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/crates/networking/rpc/engine/fork_choice.rs b/crates/networking/rpc/engine/fork_choice.rs index 42e1393c72..a223f12975 100644 --- a/crates/networking/rpc/engine/fork_choice.rs +++ b/crates/networking/rpc/engine/fork_choice.rs @@ -14,7 +14,7 @@ use crate::{ payload::PayloadStatus, }, utils::RpcRequest, - RpcApiContext, RpcErr, RpcHandler, + RpcApiContext, RpcErr, RpcHandler, SyncStatus, }; #[derive(Debug)] @@ -153,13 +153,20 @@ fn handle_forkchoice( fork_choice_state.safe_block_hash, fork_choice_state.finalized_block_hash ); + // Check if there is an ongoing sync before applying the forkchoice + let fork_choice_res = match context.sync_status()? { + // Apply current fork choice + SyncStatus::Inactive => apply_fork_choice( + &context.storage, + fork_choice_state.head_block_hash, + fork_choice_state.safe_block_hash, + fork_choice_state.finalized_block_hash, + ), + // Restart sync if needed + _ => Err(InvalidForkChoice::Syncing), + }; - match apply_fork_choice( - &context.storage, - fork_choice_state.head_block_hash, - fork_choice_state.safe_block_hash, - fork_choice_state.finalized_block_hash, - ) { + match fork_choice_res { Ok(head) => Ok(( Some(head), ForkChoiceResponse::from(PayloadStatus::valid_with_hash( diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index 336d0d896f..d194934b3e 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -62,7 +62,7 @@ mod web3; use axum::extract::State; use ethrex_net::types::Node; -use ethrex_storage::Store; +use ethrex_storage::{error::StoreError, Store}; #[derive(Debug, Clone)] pub struct RpcApiContext { @@ -73,6 +73,32 @@ pub struct RpcApiContext { syncer: Arc>, } +/// Describes the client's current sync status: +/// Inactive: There is no active sync process +/// Active: The client is currently syncing +/// Pending: The previous sync process became stale, awaiting restart +pub enum SyncStatus { + Inactive, + Active, + Pending, +} + +impl RpcApiContext { + /// Returns the engine's current sync status, see [SyncStatus] + pub fn sync_status(&self) -> Result { + // Try to get hold of the sync manager, if we can't then it means it is currently involved in a sync process + Ok(if self.syncer.try_lock().is_ok() { + SyncStatus::Active + // Check if there is a checkpoint left from a previous aborted sync + } else if self.storage.get_latest_downloaded_header()?.is_some() { + SyncStatus::Pending + // No trace of a sync being handled + } else { + SyncStatus::Inactive + }) + } +} + trait RpcHandler: Sized { fn parse(params: &Option>) -> Result; From 40e4cc540679e116c6f21cf5094a9936e8ab69fb Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 11:36:55 -0300 Subject: [PATCH 060/189] Use current SyncStatus when applying a new payload --- crates/networking/p2p/sync.rs | 5 ++- crates/networking/rpc/engine/payload.rs | 53 +++++++++++++++---------- 2 files changed, 36 insertions(+), 22 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index f2e2d008e2..29dfe2ef67 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -194,7 +194,10 @@ impl SyncManager { let mut pivot_header = store .get_block_header_by_hash(all_block_hashes[pivot_idx])? .ok_or(SyncError::CorruptDB)?; - info!("Selected block {} as pivot for snap sync", pivot_header.number); + info!( + "Selected block {} as pivot for snap sync", + pivot_header.number + ); let mut stale_pivot = !rebuild_state_trie(pivot_header.state_root, self.peers.clone(), store.clone()) .await?; diff --git a/crates/networking/rpc/engine/payload.rs b/crates/networking/rpc/engine/payload.rs index 92e0595810..0e4e28c7c7 100644 --- a/crates/networking/rpc/engine/payload.rs +++ b/crates/networking/rpc/engine/payload.rs @@ -4,11 +4,11 @@ use ethrex_blockchain::payload::build_payload; use ethrex_core::types::{BlobsBundle, Block, Fork}; use ethrex_core::{H256, U256}; use serde_json::Value; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, warn}; use crate::types::payload::{ExecutionPayload, ExecutionPayloadResponse, PayloadStatus}; use crate::utils::RpcRequest; -use crate::{RpcApiContext, RpcErr, RpcHandler}; +use crate::{RpcApiContext, RpcErr, RpcHandler, SyncStatus}; // NewPayload V1-V2-V3 implementations pub struct NewPayloadV1Request { @@ -92,20 +92,28 @@ impl RpcHandler for NewPayloadV3Request { let block = get_block_from_payload(&self.payload, Some(self.parent_beacon_block_root))?; validate_fork(&block, Fork::Cancun, &context)?; let payload_status = { - if let Err(RpcErr::Internal(error_msg)) = validate_block_hash(&self.payload, &block) { - PayloadStatus::invalid_with_err(&error_msg) - } else { - let blob_versioned_hashes: Vec = block - .body - .transactions - .iter() - .flat_map(|tx| tx.blob_versioned_hashes()) - .collect(); - - if self.expected_blob_versioned_hashes != blob_versioned_hashes { - PayloadStatus::invalid_with_err("Invalid blob_versioned_hashes") - } else { - execute_payload(&block, &context)? + // Ignore incoming + match context.sync_status()? { + SyncStatus::Active | SyncStatus::Pending => PayloadStatus::syncing(), + SyncStatus::Inactive => { + if let Err(RpcErr::Internal(error_msg)) = + validate_block_hash(&self.payload, &block) + { + PayloadStatus::invalid_with_err(&error_msg) + } else { + let blob_versioned_hashes: Vec = block + .body + .transactions + .iter() + .flat_map(|tx| tx.blob_versioned_hashes()) + .collect(); + + if self.expected_blob_versioned_hashes != blob_versioned_hashes { + PayloadStatus::invalid_with_err("Invalid blob_versioned_hashes") + } else { + execute_payload(&block, &context)? + } + } } } }; @@ -195,11 +203,14 @@ fn handle_new_payload_v1_v2( ) -> Result { let block = get_block_from_payload(payload, None)?; validate_fork(&block, fork, &context)?; - let payload_status = { - if let Err(RpcErr::Internal(error_msg)) = validate_block_hash(payload, &block) { - PayloadStatus::invalid_with_err(&error_msg) - } else { - execute_payload(&block, &context)? + let payload_status = match context.sync_status()? { + SyncStatus::Active | SyncStatus::Pending => PayloadStatus::syncing(), + SyncStatus::Inactive => { + if let Err(RpcErr::Internal(error_msg)) = validate_block_hash(payload, &block) { + PayloadStatus::invalid_with_err(&error_msg) + } else { + execute_payload(&block, &context)? + } } }; serde_json::to_value(payload_status).map_err(|error| RpcErr::Internal(error.to_string())) From 25785f3a6b52d9c158c218fb638010c1ff309412 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 11:49:41 -0300 Subject: [PATCH 061/189] Debug --- crates/networking/p2p/rlpx/connection.rs | 7 +++++-- crates/networking/p2p/sync.rs | 10 ++++++++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index a9d5fbdd3b..87216c1281 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -650,8 +650,11 @@ impl RLPxConnection { if let RLPxConnectionState::Established(state) = &mut self.state { let mut frame_buffer = vec![]; message.encode(&mut frame_buffer)?; - frame::write(frame_buffer, state, &mut self.stream).await?; - Ok(()) + let ret = frame::write(frame_buffer, state, &mut self.stream).await; + if ret.is_err() { + warn!("Failed to send message: {message:?}"); + } + ret } else { Err(RLPxError::InvalidState()) } diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 29dfe2ef67..f4903ce323 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -466,7 +466,10 @@ async fn bytecode_fetcher( match receiver.recv().await { Some(code_hashes) if !code_hashes.is_empty() => { pending_bytecodes.extend(code_hashes); - info!("Received incoming bytecode request, current batch: {}/{BATCH_SIZE}", pending_bytecodes.len()) + info!( + "Received incoming bytecode request, current batch: {}/{BATCH_SIZE}", + pending_bytecodes.len() + ) } // Disconnect / Empty message signaling no more bytecodes to sync _ => incoming = false, @@ -523,7 +526,10 @@ async fn storage_fetcher( match receiver.recv().await { Some(account_hashes_and_roots) if !account_hashes_and_roots.is_empty() => { pending_storage.extend(account_hashes_and_roots); - info!("Received incoming storage range request, current batch: {}/{BATCH_SIZE}", pending_storage.len()) + info!( + "Received incoming storage range request, current batch: {}/{BATCH_SIZE}", + pending_storage.len() + ) } // Disconnect / Empty message signaling no more bytecodes to sync _ => incoming = false, From eb8cce565bd0351765a5670944df78e11fd980c6 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 11:52:37 -0300 Subject: [PATCH 062/189] Debug --- crates/networking/p2p/sync.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index f4903ce323..85699c704a 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -166,7 +166,10 @@ impl SyncManager { store.add_block_headers(block_hashes, block_headers.clone())?; if sync_head_found { - let sync_header = block_headers.iter().find(|h| h.compute_block_hash() == sync_head).unwrap(); + let sync_header = block_headers + .iter() + .find(|h| h.compute_block_hash() == sync_head) + .unwrap(); info!("Found sync head at block: {}", sync_header.number); // No more headers to request @@ -515,7 +518,7 @@ async fn storage_fetcher( store: Store, state_root: H256, ) -> Result<(), StoreError> { - const BATCH_SIZE: usize = 100; + const BATCH_SIZE: usize = 50; // Pending list of storages to fetch let mut pending_storage: Vec<(H256, H256)> = vec![]; // TODO: Also add a queue for storages that were incompletely fecthed, @@ -556,6 +559,11 @@ async fn fetch_storage_batch( peers: Arc>, store: Store, ) -> Result, StoreError> { + info!( + "Requesting storage ranges for addresses {}..{}", + batch.first().unwrap().0, + batch.last().unwrap().0 + ); for _ in 0..MAX_RETRIES { let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; let (batch_hahses, batch_roots) = batch.clone().into_iter().unzip(); From 84f5de683d26ec7a9075973a5ea8dd0ae382c163 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 11:59:11 -0300 Subject: [PATCH 063/189] Debug --- crates/networking/p2p/rlpx/connection.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 87216c1281..132e4c1ea9 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -652,7 +652,8 @@ impl RLPxConnection { message.encode(&mut frame_buffer)?; let ret = frame::write(frame_buffer, state, &mut self.stream).await; if ret.is_err() { - warn!("Failed to send message: {message:?}"); + let node_id = self.get_remote_node_id()?; + warn!("Failed to send message: {message:?}, to: {node_id}"); } ret } else { From b3074bb3331d0f0fb2ac4443b1268dbc9e98d685 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 12:10:57 -0300 Subject: [PATCH 064/189] Debug --- crates/networking/p2p/sync.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 85699c704a..15f0cffc73 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -392,6 +392,7 @@ async fn rebuild_state_trie( .request_account_range(state_root, start_account_hash) .await { + info!("Received {} account ranges", accounts.len()); // Update starting hash for next batch if should_continue { start_account_hash = *account_hashes.last().unwrap(); @@ -438,6 +439,7 @@ async fn rebuild_state_trie( } } } + info!("Account Trie fully fetched, signaling storage fetcher process"); // Send empty batch to signal that no more batches are incoming storage_sender.send(vec![]).await?; storage_fetcher_handle.await??; @@ -445,11 +447,13 @@ async fn rebuild_state_trie( info!("Completed state sync for state root {state_root}"); true } else { + info!("Oh no! Trie needs healing"); // Perform state healing to fix any potential inconsistency in the rebuilt tries // As we are not fetching different chunks of the same trie this step is not necessary heal_state_trie(bytecode_sender.clone(), state_root, store, peers).await? }; // Send empty batch to signal that no more batches are incoming + info!("Account Trie fully rebuilt, signaling bytecode fetcher process"); bytecode_sender.send(vec![]).await?; bytecode_fetcher_handle.await??; Ok(sync_complete) @@ -475,7 +479,7 @@ async fn bytecode_fetcher( ) } // Disconnect / Empty message signaling no more bytecodes to sync - _ => incoming = false, + _ => {info!("Final bytecode batch"); incoming = false}, } // If we have enough pending bytecodes to fill a batch // or if we have no more incoming batches, spawn a fetch process @@ -535,7 +539,7 @@ async fn storage_fetcher( ) } // Disconnect / Empty message signaling no more bytecodes to sync - _ => incoming = false, + _ => {info!("Final storage batch"); incoming = false} } // If we have enough pending bytecodes to fill a batch // or if we have no more incoming batches, spawn a fetch process From d55d22f62d4b5b48605ab5af886d2029cbc17763 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 12:24:20 -0300 Subject: [PATCH 065/189] Debug --- crates/networking/p2p/sync.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 15f0cffc73..58443ec543 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -443,14 +443,17 @@ async fn rebuild_state_trie( // Send empty batch to signal that no more batches are incoming storage_sender.send(vec![]).await?; storage_fetcher_handle.await??; + info!("Current State Root: {current_state_root} vs Expected Root: {state_root}"); let sync_complete = if current_state_root == state_root { info!("Completed state sync for state root {state_root}"); true } else { info!("Oh no! Trie needs healing"); + info!("Skipping state healing"); + true // Perform state healing to fix any potential inconsistency in the rebuilt tries // As we are not fetching different chunks of the same trie this step is not necessary - heal_state_trie(bytecode_sender.clone(), state_root, store, peers).await? + //heal_state_trie(bytecode_sender.clone(), state_root, store, peers).await? }; // Send empty batch to signal that no more batches are incoming info!("Account Trie fully rebuilt, signaling bytecode fetcher process"); @@ -479,7 +482,10 @@ async fn bytecode_fetcher( ) } // Disconnect / Empty message signaling no more bytecodes to sync - _ => {info!("Final bytecode batch"); incoming = false}, + _ => { + info!("Final bytecode batch"); + incoming = false + } } // If we have enough pending bytecodes to fill a batch // or if we have no more incoming batches, spawn a fetch process @@ -539,7 +545,10 @@ async fn storage_fetcher( ) } // Disconnect / Empty message signaling no more bytecodes to sync - _ => {info!("Final storage batch"); incoming = false} + _ => { + info!("Final storage batch"); + incoming = false + } } // If we have enough pending bytecodes to fill a batch // or if we have no more incoming batches, spawn a fetch process From ce488ba3b9cc4acfd7ee6ce6951def517d33f04f Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 13:44:40 -0300 Subject: [PATCH 066/189] Debug --- crates/networking/p2p/peer_channels.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 0754f45fc3..1cf1f2fde5 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -315,9 +315,7 @@ impl PeerChannels { }) .await .ok()??; - if slots.is_empty() && proof.is_empty() { - info!("Peer returned empty storage ranges"); - } + info!("Peer returned {} storage ranges", slots.len()); // Check we got a reasonable amount of storage ranges if slots.len() > storage_roots.len() || slots.is_empty() { return None; From 90571fe460d533b836a1d6ed527ff590ab279dde Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 13:46:47 -0300 Subject: [PATCH 067/189] [REVERT ME] disable receipt fetching --- crates/networking/p2p/sync.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 58443ec543..7e283e5ae6 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -226,11 +226,11 @@ impl SyncManager { store_bodies_handle.await??; // For all blocks before the pivot: Store the bodies and fetch the receipts // For all blocks after the pivot: Process them fully - let store_receipts_handle = tokio::spawn(store_receipts( - all_block_hashes[pivot_idx..].to_vec(), - self.peers.clone(), - store.clone(), - )); + // let store_receipts_handle = tokio::spawn(store_receipts( + // all_block_hashes[pivot_idx..].to_vec(), + // self.peers.clone(), + // store.clone(), + // )); for hash in all_block_hashes.into_iter() { let block = store.get_block_by_hash(hash)?.ok_or(SyncError::CorruptDB)?; if block.header.number <= pivot_header.number { @@ -242,7 +242,7 @@ impl SyncManager { ethrex_blockchain::add_block(&block, &store)?; } } - store_receipts_handle.await??; + //store_receipts_handle.await??; self.last_snap_pivot = pivot_header.number; } SyncMode::Full => { From a7b195fda2764e1a5fcad878aeaed8cb2d04b796 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 13:54:27 -0300 Subject: [PATCH 068/189] Execute blocks after pivot --- crates/networking/p2p/sync.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 7e283e5ae6..7e5cc7fbed 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -231,16 +231,14 @@ impl SyncManager { // self.peers.clone(), // store.clone(), // )); - for hash in all_block_hashes.into_iter() { - let block = store.get_block_by_hash(hash)?.ok_or(SyncError::CorruptDB)?; - if block.header.number <= pivot_header.number { - store.set_canonical_block(block.header.number, hash)?; - store.add_block(block)?; - } else { - store.set_canonical_block(block.header.number, hash)?; - store.update_latest_block_number(block.header.number)?; - ethrex_blockchain::add_block(&block, &store)?; - } + // Execute blocks after pivot + for hash in &all_block_hashes[pivot_idx..] { + let block = store + .get_block_by_hash(*hash)? + .ok_or(SyncError::CorruptDB)?; + store.set_canonical_block(block.header.number, *hash)?; + store.update_latest_block_number(block.header.number)?; + ethrex_blockchain::add_block(&block, &store)?; } //store_receipts_handle.await??; self.last_snap_pivot = pivot_header.number; From a53cfaa8ba1b20ba99b922e43a8e5120da13fd8c Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 14:21:09 -0300 Subject: [PATCH 069/189] Trace verify range --- crates/storage/trie/verify_range.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index 8365354331..6df77a01d8 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -2,6 +2,7 @@ use std::{cmp::Ordering, collections::HashMap}; use ethereum_types::H256; use sha3::{Digest, Keccak256}; +use tracing::warn; use crate::{ nibbles::Nibbles, node::Node, node_hash::NodeHash, state::TrieState, Trie, TrieError, ValueRLP, @@ -29,6 +30,7 @@ pub fn verify_range( // Check that the key range is monotonically increasing for keys in keys.windows(2) { if keys[0] >= keys[1] { + warn!("key range is not monotonically increasing"); return Err(TrieError::Verify(String::from( "key range is not monotonically increasing", ))); @@ -36,6 +38,7 @@ pub fn verify_range( } // Check for empty values if values.iter().any(|value| value.is_empty()) { + warn!("value range contains empty value"); return Err(TrieError::Verify(String::from( "value range contains empty value", ))); @@ -52,6 +55,9 @@ pub fn verify_range( } let hash = trie.hash()?; if hash != root { + warn!( + "[Special Case: Empty Proof] invalid proof, expected root hash {root}, got {hash}" + ); return Err(TrieError::Verify(format!( "invalid proof, expected root hash {}, got {}", root, hash @@ -67,6 +73,7 @@ pub fn verify_range( let value = fill_state(&mut trie.state, root, first_key, &proof_nodes)?; let has_right_element = has_right_element(root, first_key.as_bytes(), &trie.state)?; if has_right_element || !value.is_empty() { + warn!("[Special Case: No keys, one edge proof] no keys returned but more are available on the trie"); return Err(TrieError::Verify( "no keys returned but more are available on the trie".to_string(), )); @@ -82,11 +89,13 @@ pub fn verify_range( // We need to check that the proof confirms the existance of the first key let value = fill_state(&mut trie.state, root, first_key, &proof_nodes)?; if first_key != &keys[0] { + warn!("[Special Case: One elem 2 proof] correct proof but invalid key"); return Err(TrieError::Verify( "correct proof but invalid key".to_string(), )); } if value != values[0] { + warn!("[Special Case: One elem 2 proof] correct proof but invalid data"); return Err(TrieError::Verify( "correct proof but invalid data".to_string(), )); @@ -96,6 +105,7 @@ pub fn verify_range( // Regular Case: Two edge proofs if first_key >= last_key { + warn!("[Regular Case] invalid edge keys"); return Err(TrieError::Verify("invalid edge keys".to_string())); } // Fill up the state with the nodes from the proof @@ -115,6 +125,7 @@ pub fn verify_range( // Check that the hash is the one we expected (aka the trie was properly reconstructed from the edge proofs and the range) let hash = trie.hash()?; if hash != root { + warn!("[Regular Case] invalid proof, expected root hash {root}, got {hash}"); return Err(TrieError::Verify(format!( "invalid proof, expected root hash {}, got {}", root, hash From 81fd7f0f0c045337b1127ac9b192d70d8c7955a5 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 14:23:49 -0300 Subject: [PATCH 070/189] Revert "Trace verify range" This reverts commit a53cfaa8ba1b20ba99b922e43a8e5120da13fd8c. --- crates/storage/trie/verify_range.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index 6df77a01d8..8365354331 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -2,7 +2,6 @@ use std::{cmp::Ordering, collections::HashMap}; use ethereum_types::H256; use sha3::{Digest, Keccak256}; -use tracing::warn; use crate::{ nibbles::Nibbles, node::Node, node_hash::NodeHash, state::TrieState, Trie, TrieError, ValueRLP, @@ -30,7 +29,6 @@ pub fn verify_range( // Check that the key range is monotonically increasing for keys in keys.windows(2) { if keys[0] >= keys[1] { - warn!("key range is not monotonically increasing"); return Err(TrieError::Verify(String::from( "key range is not monotonically increasing", ))); @@ -38,7 +36,6 @@ pub fn verify_range( } // Check for empty values if values.iter().any(|value| value.is_empty()) { - warn!("value range contains empty value"); return Err(TrieError::Verify(String::from( "value range contains empty value", ))); @@ -55,9 +52,6 @@ pub fn verify_range( } let hash = trie.hash()?; if hash != root { - warn!( - "[Special Case: Empty Proof] invalid proof, expected root hash {root}, got {hash}" - ); return Err(TrieError::Verify(format!( "invalid proof, expected root hash {}, got {}", root, hash @@ -73,7 +67,6 @@ pub fn verify_range( let value = fill_state(&mut trie.state, root, first_key, &proof_nodes)?; let has_right_element = has_right_element(root, first_key.as_bytes(), &trie.state)?; if has_right_element || !value.is_empty() { - warn!("[Special Case: No keys, one edge proof] no keys returned but more are available on the trie"); return Err(TrieError::Verify( "no keys returned but more are available on the trie".to_string(), )); @@ -89,13 +82,11 @@ pub fn verify_range( // We need to check that the proof confirms the existance of the first key let value = fill_state(&mut trie.state, root, first_key, &proof_nodes)?; if first_key != &keys[0] { - warn!("[Special Case: One elem 2 proof] correct proof but invalid key"); return Err(TrieError::Verify( "correct proof but invalid key".to_string(), )); } if value != values[0] { - warn!("[Special Case: One elem 2 proof] correct proof but invalid data"); return Err(TrieError::Verify( "correct proof but invalid data".to_string(), )); @@ -105,7 +96,6 @@ pub fn verify_range( // Regular Case: Two edge proofs if first_key >= last_key { - warn!("[Regular Case] invalid edge keys"); return Err(TrieError::Verify("invalid edge keys".to_string())); } // Fill up the state with the nodes from the proof @@ -125,7 +115,6 @@ pub fn verify_range( // Check that the hash is the one we expected (aka the trie was properly reconstructed from the edge proofs and the range) let hash = trie.hash()?; if hash != root { - warn!("[Regular Case] invalid proof, expected root hash {root}, got {hash}"); return Err(TrieError::Verify(format!( "invalid proof, expected root hash {}, got {}", root, hash From f3ac721acd3d4aa74d52eee858bb4ee2149558de Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 14:34:55 -0300 Subject: [PATCH 071/189] Trace verify range --- crates/networking/p2p/peer_channels.rs | 4 ++++ crates/storage/trie/verify_range.rs | 14 ++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 1cf1f2fde5..cc77aebff0 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -326,7 +326,9 @@ impl PeerChannels { let mut storage_values = vec![]; let mut should_continue = false; // Validate each storage range + let total_slots = slots.len(); while !slots.is_empty() { + info!("Verifying slot {}/{}", total_slots - slots.len(), total_slots); let (hahsed_keys, values): (Vec<_>, Vec<_>) = slots .remove(0) .into_iter() @@ -334,6 +336,7 @@ impl PeerChannels { .unzip(); // We won't accept empty storage ranges if hahsed_keys.is_empty() { + info!("Empty Slot"); return None; } let encoded_values = values @@ -348,6 +351,7 @@ impl PeerChannels { // - The range is not the full storage (last range): We expect 2 edge proofs if hahsed_keys.len() == 1 && hahsed_keys[0] == start { if proof.is_empty() { + info!("One element with no proof"); return None; }; let first_proof = vec![proof.remove(0)]; diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index 8365354331..89113c7c60 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -2,6 +2,7 @@ use std::{cmp::Ordering, collections::HashMap}; use ethereum_types::H256; use sha3::{Digest, Keccak256}; +use tracing::warn; use crate::{ nibbles::Nibbles, node::Node, node_hash::NodeHash, state::TrieState, Trie, TrieError, ValueRLP, @@ -15,6 +16,19 @@ pub fn verify_range( keys: &[H256], values: &[ValueRLP], proof: &[Vec], +) -> Result { + let res = verify_range_i(root, first_key, keys, values, proof); + if let Err(ref e) = res { + warn!("Verify Range failed due to : {e:?}") + } + res +} +pub fn verify_range_i( + root: H256, + first_key: &H256, + keys: &[H256], + values: &[ValueRLP], + proof: &[Vec], ) -> Result { // Store proof nodes by hash let proof_nodes = ProofNodeStorage::from_proof(proof); From 1d6548e92f4f3318d6f867c0adc19c8b7d72a066 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 14:41:52 -0300 Subject: [PATCH 072/189] Trace verify range --- crates/networking/p2p/peer_channels.rs | 1 + crates/storage/trie/verify_range.rs | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index cc77aebff0..37c4028b27 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -344,6 +344,7 @@ impl PeerChannels { .map(|val| val.encode_to_vec()) .collect::>(); let storage_root = storage_roots.remove(0); + info!("Storage root: {storage_root}"); // We have 3 cases (as we won't accept empty storage ranges): // - The range has only 1 element (with key matching the start): We expect one edge proof diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index 89113c7c60..70a823eefe 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -2,7 +2,7 @@ use std::{cmp::Ordering, collections::HashMap}; use ethereum_types::H256; use sha3::{Digest, Keccak256}; -use tracing::warn; +use tracing::{info, warn}; use crate::{ nibbles::Nibbles, node::Node, node_hash::NodeHash, state::TrieState, Trie, TrieError, ValueRLP, @@ -60,6 +60,7 @@ pub fn verify_range_i( // Special Case: No proofs given, the range is expected to be the full set of leaves if proof.is_empty() { + info!("Special Case: No Proof"); // Check that the trie constructed from the given keys and values has the expected root for (key, value) in keys.iter().zip(values.iter()) { trie.insert(key.0.to_vec(), value.clone())?; @@ -76,6 +77,7 @@ pub fn verify_range_i( // Special Case: One edge proof, no range given, there are no more values in the trie if keys.is_empty() { + info!("Special Case: No Range One Edge Proof"); // We need to check that the proof confirms the non-existance of the first key // and that there are no more elements to the right of the first key let value = fill_state(&mut trie.state, root, first_key, &proof_nodes)?; @@ -93,6 +95,7 @@ pub fn verify_range_i( // Special Case: There is only one element and the two edge keys are the same if keys.len() == 1 && first_key == last_key { + info!("Special Case: One elem 2 proof"); // We need to check that the proof confirms the existance of the first key let value = fill_state(&mut trie.state, root, first_key, &proof_nodes)?; if first_key != &keys[0] { @@ -109,6 +112,7 @@ pub fn verify_range_i( } // Regular Case: Two edge proofs + info!("Regular Case: 2 proof"); if first_key >= last_key { return Err(TrieError::Verify("invalid edge keys".to_string())); } From a8aba1224911c9a4e3a994685f2550c08293dfbd Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 14:51:35 -0300 Subject: [PATCH 073/189] Trace verify range --- crates/networking/p2p/peer_channels.rs | 10 +- crates/storage/trie/trie.rs | 2 +- crates/storage/trie/verify_range.rs | 126 ++++++++++++++++++++++++- 3 files changed, 131 insertions(+), 7 deletions(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 37c4028b27..772960f064 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -6,8 +6,8 @@ use ethrex_core::{ H256, U256, }; use ethrex_rlp::encode::RLPEncode; -use ethrex_trie::Nibbles; use ethrex_trie::{verify_range, Node}; +use ethrex_trie::{verify_range_ex, Nibbles}; use tokio::sync::{mpsc, Mutex}; use tracing::{info, warn}; @@ -328,7 +328,11 @@ impl PeerChannels { // Validate each storage range let total_slots = slots.len(); while !slots.is_empty() { - info!("Verifying slot {}/{}", total_slots - slots.len(), total_slots); + info!( + "Verifying slot {}/{}", + total_slots - slots.len(), + total_slots + ); let (hahsed_keys, values): (Vec<_>, Vec<_>) = slots .remove(0) .into_iter() @@ -368,7 +372,7 @@ impl PeerChannels { // Last element with two edge proofs if slots.is_empty() && proof.len() >= 2 { let last_proof = vec![proof.remove(0), proof.remove(0)]; - should_continue = verify_range( + should_continue = verify_range_ex( storage_root, &start, &hahsed_keys, diff --git a/crates/storage/trie/trie.rs b/crates/storage/trie/trie.rs index ee61b639a6..4dcfa2f559 100644 --- a/crates/storage/trie/trie.rs +++ b/crates/storage/trie/trie.rs @@ -20,7 +20,7 @@ pub use self::db::{libmdbx::LibmdbxTrieDB, libmdbx_dupsort::LibmdbxDupsortTrieDB pub use self::db::{in_memory::InMemoryTrieDB, TrieDB}; pub use self::nibbles::Nibbles; -pub use self::verify_range::verify_range; +pub use self::verify_range::{verify_range, verify_range_ex}; pub use self::{node::Node, state::TrieState}; pub use self::error::TrieError; diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index 70a823eefe..f98a3c8712 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -10,20 +10,20 @@ use crate::{ /// Verifies that the key value range belongs to the trie with the given root given the edge proofs for the range /// Also returns true if there is more state to be fetched (aka if there are more keys to the right of the given range) -pub fn verify_range( +pub fn verify_range_ex( root: H256, first_key: &H256, keys: &[H256], values: &[ValueRLP], proof: &[Vec], ) -> Result { - let res = verify_range_i(root, first_key, keys, values, proof); + let res = verify_range_ex_i(root, first_key, keys, values, proof); if let Err(ref e) = res { warn!("Verify Range failed due to : {e:?}") } res } -pub fn verify_range_i( +pub fn verify_range_ex_i( root: H256, first_key: &H256, keys: &[H256], @@ -116,6 +116,126 @@ pub fn verify_range_i( if first_key >= last_key { return Err(TrieError::Verify("invalid edge keys".to_string())); } + // Show nodes + for node in proof { + let hash = H256::from_slice(&Keccak256::new_with_prefix(node).finalize().to_vec()); + let node = Node::decode_raw(node).unwrap(); + info!("Node: {hash}: {node:?}") + } + // Fill up the state with the nodes from the proof + fill_state(&mut trie.state, root, first_key, &proof_nodes)?; + fill_state(&mut trie.state, root, last_key, &proof_nodes)?; + // Remove all references to the internal nodes that belong to the range so they can be reconstructed + let empty = remove_internal_references(root, first_key, last_key, &mut trie.state)?; + if !empty { + trie.root = Some(NodeHash::from(root)); + } + // Reconstruct the internal nodes by inserting the elements on the range + for (key, value) in keys.iter().zip(values.iter()) { + trie.insert(key.0.to_vec(), value.clone())?; + } + // Check for elements to the right of the range before we wipe the sate + let has_right_element = has_right_element(root, last_key.as_bytes(), &trie.state)?; + // Check that the hash is the one we expected (aka the trie was properly reconstructed from the edge proofs and the range) + let hash = trie.hash()?; + if hash != root { + return Err(TrieError::Verify(format!( + "invalid proof, expected root hash {}, got {}", + root, hash + ))); + } + Ok(has_right_element) +} + +pub fn verify_range( + root: H256, + first_key: &H256, + keys: &[H256], + values: &[ValueRLP], + proof: &[Vec], +) -> Result { + // Store proof nodes by hash + let proof_nodes = ProofNodeStorage::from_proof(proof); + // Validate range + if keys.len() != values.len() { + return Err(TrieError::Verify(format!( + "inconsistent proof data, got {} keys and {} values", + keys.len(), + values.len() + ))); + } + // Check that the key range is monotonically increasing + for keys in keys.windows(2) { + if keys[0] >= keys[1] { + return Err(TrieError::Verify(String::from( + "key range is not monotonically increasing", + ))); + } + } + // Check for empty values + if values.iter().any(|value| value.is_empty()) { + return Err(TrieError::Verify(String::from( + "value range contains empty value", + ))); + } + + // Verify ranges depending on the given proof + let mut trie = Trie::stateless(); + + // Special Case: No proofs given, the range is expected to be the full set of leaves + if proof.is_empty() { + // Check that the trie constructed from the given keys and values has the expected root + for (key, value) in keys.iter().zip(values.iter()) { + trie.insert(key.0.to_vec(), value.clone())?; + } + let hash = trie.hash()?; + if hash != root { + return Err(TrieError::Verify(format!( + "invalid proof, expected root hash {}, got {}", + root, hash + ))); + } + return Ok(false); + } + + // Special Case: One edge proof, no range given, there are no more values in the trie + if keys.is_empty() { + // We need to check that the proof confirms the non-existance of the first key + // and that there are no more elements to the right of the first key + let value = fill_state(&mut trie.state, root, first_key, &proof_nodes)?; + let has_right_element = has_right_element(root, first_key.as_bytes(), &trie.state)?; + if has_right_element || !value.is_empty() { + return Err(TrieError::Verify( + "no keys returned but more are available on the trie".to_string(), + )); + } else { + return Ok(false); + } + } + + let last_key = keys.last().unwrap(); + + // Special Case: There is only one element and the two edge keys are the same + if keys.len() == 1 && first_key == last_key { + // We need to check that the proof confirms the existance of the first key + let value = fill_state(&mut trie.state, root, first_key, &proof_nodes)?; + if first_key != &keys[0] { + return Err(TrieError::Verify( + "correct proof but invalid key".to_string(), + )); + } + if value != values[0] { + return Err(TrieError::Verify( + "correct proof but invalid data".to_string(), + )); + } + return has_right_element(root, first_key.as_bytes(), &trie.state); + } + + // Regular Case: Two edge proofs + if first_key >= last_key { + return Err(TrieError::Verify("invalid edge keys".to_string())); + } // Fill up the state with the nodes from the proof fill_state(&mut trie.state, root, first_key, &proof_nodes)?; fill_state(&mut trie.state, root, last_key, &proof_nodes)?; From b5b4a241530e76fc0e9dd2201e06854be7f84815 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 14:59:07 -0300 Subject: [PATCH 074/189] Trace verify range --- crates/storage/trie/verify_range.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index f98a3c8712..e310edf26c 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -289,7 +289,9 @@ fn fill_node( proof_nodes: &ProofNodeStorage, ) -> Result, TrieError> { let node = proof_nodes.get_node(node_hash)?; + info!("Filling node: {node:?} on path {path:?}"); let child_hash = get_child(path, &node); + info!("got child: {child_hash:?}"); if let Some(ref child_hash) = child_hash { trie_state.insert_node(node, node_hash.clone()); fill_node(path, child_hash, trie_state, proof_nodes) From 024c13b21bdcbb5477987b1c1e0affd99fa16a07 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 15:04:42 -0300 Subject: [PATCH 075/189] Trace verify range --- crates/storage/trie/verify_range.rs | 48 +++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index e310edf26c..a7949f0699 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -123,8 +123,8 @@ pub fn verify_range_ex_i( info!("Node: {hash}: {node:?}") } // Fill up the state with the nodes from the proof - fill_state(&mut trie.state, root, first_key, &proof_nodes)?; - fill_state(&mut trie.state, root, last_key, &proof_nodes)?; + fill_state_ex(&mut trie.state, root, first_key, &proof_nodes)?; + fill_state_ex(&mut trie.state, root, last_key, &proof_nodes)?; // Remove all references to the internal nodes that belong to the range so they can be reconstructed let empty = remove_internal_references(root, first_key, last_key, &mut trie.state)?; if !empty { @@ -279,10 +279,25 @@ fn fill_state( ) } +fn fill_state_ex( + trie_state: &mut TrieState, + root_hash: H256, + first_key: &H256, + proof_nodes: &ProofNodeStorage, +) -> Result, TrieError> { + let mut path = Nibbles::from_bytes(&first_key.0); + fill_node_ex( + &mut path, + &NodeHash::from(root_hash), + trie_state, + proof_nodes, + ) +} + /// Fills up the TrieState with nodes from the proof traversing the path given by first_key /// Returns an error if there are gaps in the proof node path /// Also returns the value if it is part of the proof -fn fill_node( +fn fill_node_ex( path: &mut Nibbles, node_hash: &NodeHash, trie_state: &mut TrieState, @@ -308,6 +323,33 @@ fn fill_node( } } +/// Fills up the TrieState with nodes from the proof traversing the path given by first_key +/// Returns an error if there are gaps in the proof node path +/// Also returns the value if it is part of the proof +fn fill_node( + path: &mut Nibbles, + node_hash: &NodeHash, + trie_state: &mut TrieState, + proof_nodes: &ProofNodeStorage, +) -> Result, TrieError> { + let node = proof_nodes.get_node(node_hash)?; + let child_hash = get_child(path, &node); + if let Some(ref child_hash) = child_hash { + trie_state.insert_node(node, node_hash.clone()); + fill_node(path, child_hash, trie_state, proof_nodes) + } else { + let value = match &node { + Node::Branch(n) => n.value.clone(), + Node::Extension(_) => vec![], + Node::Leaf(n) => (*path == n.partial) + .then_some(n.value.clone()) + .unwrap_or_default(), + }; + trie_state.insert_node(node, node_hash.clone()); + Ok(value) + } +} + /// Returns the node hash of the node's child (if any) following the given path fn get_child<'a>(path: &'a mut Nibbles, node: &'a Node) -> Option { match node { From 3a77d933f585a89a9ebb8f901b67208ad8e420bd Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 15:16:24 -0300 Subject: [PATCH 076/189] Trace verify range --- crates/storage/trie/verify_range.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index a7949f0699..fec176cf11 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -303,8 +303,9 @@ fn fill_node_ex( trie_state: &mut TrieState, proof_nodes: &ProofNodeStorage, ) -> Result, TrieError> { + info!("Filling node: {node_hash:?} on path {path:?}"); let node = proof_nodes.get_node(node_hash)?; - info!("Filling node: {node:?} on path {path:?}"); + info!("got node: {node:?}"); let child_hash = get_child(path, &node); info!("got child: {child_hash:?}"); if let Some(ref child_hash) = child_hash { From bb973340a8bfa38b149705a18e69ce5ed90a0182 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 15:20:03 -0300 Subject: [PATCH 077/189] Trace verify range --- crates/storage/trie/verify_range.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index fec176cf11..315d9ba7dc 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -310,7 +310,7 @@ fn fill_node_ex( info!("got child: {child_hash:?}"); if let Some(ref child_hash) = child_hash { trie_state.insert_node(node, node_hash.clone()); - fill_node(path, child_hash, trie_state, proof_nodes) + fill_node_ex(path, child_hash, trie_state, proof_nodes) } else { let value = match &node { Node::Branch(n) => n.value.clone(), From f768099f1144d24458ded7ed7918dd9c6108c49d Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 15:24:33 -0300 Subject: [PATCH 078/189] Trace verify range --- crates/networking/p2p/peer_channels.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 772960f064..a79c9df11a 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -371,6 +371,7 @@ impl PeerChannels { } // Last element with two edge proofs if slots.is_empty() && proof.len() >= 2 { + info!("Remaining proofs for last element: {}", proof.len()); let last_proof = vec![proof.remove(0), proof.remove(0)]; should_continue = verify_range_ex( storage_root, From 26855e47f942239976ed70da385fcbe78ef1ddbd Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 15:34:37 -0300 Subject: [PATCH 079/189] Trace verify range --- crates/networking/p2p/peer_channels.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index a79c9df11a..05a515cedd 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -327,6 +327,7 @@ impl PeerChannels { let mut should_continue = false; // Validate each storage range let total_slots = slots.len(); + info!("proofs: {proof:?}"); while !slots.is_empty() { info!( "Verifying slot {}/{}", From d915aac3380c351d434891dff5406a8dc58c8ef0 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 15:36:55 -0300 Subject: [PATCH 080/189] Trace verify range --- crates/networking/p2p/peer_channels.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 05a515cedd..8838a547ca 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -327,7 +327,7 @@ impl PeerChannels { let mut should_continue = false; // Validate each storage range let total_slots = slots.len(); - info!("proofs: {proof:?}"); + info!("proof count: {}", proof.len()); while !slots.is_empty() { info!( "Verifying slot {}/{}", @@ -356,6 +356,7 @@ impl PeerChannels { // - The range has the full storage: We expect no proofs // - The range is not the full storage (last range): We expect 2 edge proofs if hahsed_keys.len() == 1 && hahsed_keys[0] == start { + info!(" 1 Elem - 1 Proof"); if proof.is_empty() { info!("One element with no proof"); return None; @@ -383,6 +384,7 @@ impl PeerChannels { ) .ok()?; } else { + info!(" Full Range - 0 Proof"); // Full range (no proofs) verify_range(storage_root, &start, &hahsed_keys, &encoded_values, &[]).ok()?; } From 7512ad476c24075e7b4292d089be3e25b8592af4 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 15:49:51 -0300 Subject: [PATCH 081/189] Fix validate storage range logic --- crates/networking/p2p/peer_channels.rs | 44 ++++---------------------- 1 file changed, 7 insertions(+), 37 deletions(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 8838a547ca..b27367ccb5 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -6,8 +6,8 @@ use ethrex_core::{ H256, U256, }; use ethrex_rlp::encode::RLPEncode; +use ethrex_trie::Nibbles; use ethrex_trie::{verify_range, Node}; -use ethrex_trie::{verify_range_ex, Nibbles}; use tokio::sync::{mpsc, Mutex}; use tracing::{info, warn}; @@ -321,13 +321,12 @@ impl PeerChannels { return None; } // Unzip & validate response - let mut proof = encodable_to_proof(&proof); + let proof = encodable_to_proof(&proof); let mut storage_keys = vec![]; let mut storage_values = vec![]; let mut should_continue = false; // Validate each storage range let total_slots = slots.len(); - info!("proof count: {}", proof.len()); while !slots.is_empty() { info!( "Verifying slot {}/{}", @@ -351,41 +350,12 @@ impl PeerChannels { let storage_root = storage_roots.remove(0); info!("Storage root: {storage_root}"); - // We have 3 cases (as we won't accept empty storage ranges): - // - The range has only 1 element (with key matching the start): We expect one edge proof - // - The range has the full storage: We expect no proofs - // - The range is not the full storage (last range): We expect 2 edge proofs - if hahsed_keys.len() == 1 && hahsed_keys[0] == start { - info!(" 1 Elem - 1 Proof"); - if proof.is_empty() { - info!("One element with no proof"); - return None; - }; - let first_proof = vec![proof.remove(0)]; - verify_range( - storage_root, - &start, - &hahsed_keys, - &encoded_values, - &first_proof, - ) - .ok()?; - } - // Last element with two edge proofs - if slots.is_empty() && proof.len() >= 2 { - info!("Remaining proofs for last element: {}", proof.len()); - let last_proof = vec![proof.remove(0), proof.remove(0)]; - should_continue = verify_range_ex( - storage_root, - &start, - &hahsed_keys, - &encoded_values, - &last_proof, - ) - .ok()?; + // The proof corresponds to the last slot, for the previous ones the slot must be the full range without edge proofs + if slots.is_empty() { + should_continue = + verify_range(storage_root, &start, &hahsed_keys, &encoded_values, &proof) + .ok()?; } else { - info!(" Full Range - 0 Proof"); - // Full range (no proofs) verify_range(storage_root, &start, &hahsed_keys, &encoded_values, &[]).ok()?; } From 075ce35ad1f8bd875dd17fef1c3406b31764f696 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 16:05:30 -0300 Subject: [PATCH 082/189] Fix retry counter when fetching account ranges --- crates/networking/p2p/sync.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 9c9eb85ac4..0762054bb5 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -327,8 +327,8 @@ async fn rebuild_state_trie( // We cannot keep an open trie here so we will track the root between lookups let mut current_state_root = *EMPTY_TRIE_HASH; // Fetch Account Ranges - // If we reached the maximum amount of retries then it means the state we are requesting is probably old and no longer available - for _ in 0..MAX_RETRIES { + let mut retry_count = 0; + while retry_count <= MAX_RETRIES { let peer = peers .clone() .lock() @@ -340,6 +340,8 @@ async fn rebuild_state_trie( .request_account_range(state_root, start_account_hash) .await { + // Reset retry counter + retry_count = 0; // Update starting hash for next batch if should_continue { start_account_hash = *account_hashes.last().unwrap(); @@ -384,6 +386,8 @@ async fn rebuild_state_trie( // All accounts fetched! break; } + } else { + retry_count += 1; } } // Send empty batch to signal that no more batches are incoming From 94338ae87619743ceccf28bb42ce12af843c52ef Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 16:08:12 -0300 Subject: [PATCH 083/189] Only activate full sync after succesful snap sync --- crates/networking/p2p/sync.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 0762054bb5..4657e5fdca 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -79,8 +79,6 @@ impl SyncManager { "Sync finished, time elapsed: {} secs", start_time.elapsed().as_secs() ); - // Next sync will be full-sync - self.sync_mode = SyncMode::Full; } Err(error) => warn!( "Sync failed due to {error}, time elapsed: {} secs ", @@ -200,6 +198,8 @@ impl SyncManager { } store_receipts_handle.await??; self.last_snap_pivot = pivot_header.number; + // Next sync will be full-sync + self.sync_mode = SyncMode::Full; } SyncMode::Full => { // full-sync: Fetch all block bodies and execute them sequentially to build the state From 231867f98ab95fa212ae436e017b684c80fdb4c2 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 16:12:27 -0300 Subject: [PATCH 084/189] Restore comment --- crates/networking/p2p/sync.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 4657e5fdca..5d7c8c2c9e 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -327,6 +327,7 @@ async fn rebuild_state_trie( // We cannot keep an open trie here so we will track the root between lookups let mut current_state_root = *EMPTY_TRIE_HASH; // Fetch Account Ranges + // If we reached the maximum amount of retries then it means the state we are requesting is probably old and no longer available let mut retry_count = 0; while retry_count <= MAX_RETRIES { let peer = peers From be6464aaac3b2eacd284343a8c0831d419cba5ea Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 16:24:10 -0300 Subject: [PATCH 085/189] Clean verify range tracing --- crates/storage/trie/verify_range.rs | 182 ---------------------------- 1 file changed, 182 deletions(-) diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index 315d9ba7dc..f6c29ca47c 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -10,143 +10,6 @@ use crate::{ /// Verifies that the key value range belongs to the trie with the given root given the edge proofs for the range /// Also returns true if there is more state to be fetched (aka if there are more keys to the right of the given range) -pub fn verify_range_ex( - root: H256, - first_key: &H256, - keys: &[H256], - values: &[ValueRLP], - proof: &[Vec], -) -> Result { - let res = verify_range_ex_i(root, first_key, keys, values, proof); - if let Err(ref e) = res { - warn!("Verify Range failed due to : {e:?}") - } - res -} -pub fn verify_range_ex_i( - root: H256, - first_key: &H256, - keys: &[H256], - values: &[ValueRLP], - proof: &[Vec], -) -> Result { - // Store proof nodes by hash - let proof_nodes = ProofNodeStorage::from_proof(proof); - // Validate range - if keys.len() != values.len() { - return Err(TrieError::Verify(format!( - "inconsistent proof data, got {} keys and {} values", - keys.len(), - values.len() - ))); - } - // Check that the key range is monotonically increasing - for keys in keys.windows(2) { - if keys[0] >= keys[1] { - return Err(TrieError::Verify(String::from( - "key range is not monotonically increasing", - ))); - } - } - // Check for empty values - if values.iter().any(|value| value.is_empty()) { - return Err(TrieError::Verify(String::from( - "value range contains empty value", - ))); - } - - // Verify ranges depending on the given proof - let mut trie = Trie::stateless(); - - // Special Case: No proofs given, the range is expected to be the full set of leaves - if proof.is_empty() { - info!("Special Case: No Proof"); - // Check that the trie constructed from the given keys and values has the expected root - for (key, value) in keys.iter().zip(values.iter()) { - trie.insert(key.0.to_vec(), value.clone())?; - } - let hash = trie.hash()?; - if hash != root { - return Err(TrieError::Verify(format!( - "invalid proof, expected root hash {}, got {}", - root, hash - ))); - } - return Ok(false); - } - - // Special Case: One edge proof, no range given, there are no more values in the trie - if keys.is_empty() { - info!("Special Case: No Range One Edge Proof"); - // We need to check that the proof confirms the non-existance of the first key - // and that there are no more elements to the right of the first key - let value = fill_state(&mut trie.state, root, first_key, &proof_nodes)?; - let has_right_element = has_right_element(root, first_key.as_bytes(), &trie.state)?; - if has_right_element || !value.is_empty() { - return Err(TrieError::Verify( - "no keys returned but more are available on the trie".to_string(), - )); - } else { - return Ok(false); - } - } - - let last_key = keys.last().unwrap(); - - // Special Case: There is only one element and the two edge keys are the same - if keys.len() == 1 && first_key == last_key { - info!("Special Case: One elem 2 proof"); - // We need to check that the proof confirms the existance of the first key - let value = fill_state(&mut trie.state, root, first_key, &proof_nodes)?; - if first_key != &keys[0] { - return Err(TrieError::Verify( - "correct proof but invalid key".to_string(), - )); - } - if value != values[0] { - return Err(TrieError::Verify( - "correct proof but invalid data".to_string(), - )); - } - return has_right_element(root, first_key.as_bytes(), &trie.state); - } - - // Regular Case: Two edge proofs - info!("Regular Case: 2 proof"); - if first_key >= last_key { - return Err(TrieError::Verify("invalid edge keys".to_string())); - } - // Show nodes - for node in proof { - let hash = H256::from_slice(&Keccak256::new_with_prefix(node).finalize().to_vec()); - let node = Node::decode_raw(node).unwrap(); - info!("Node: {hash}: {node:?}") - } - // Fill up the state with the nodes from the proof - fill_state_ex(&mut trie.state, root, first_key, &proof_nodes)?; - fill_state_ex(&mut trie.state, root, last_key, &proof_nodes)?; - // Remove all references to the internal nodes that belong to the range so they can be reconstructed - let empty = remove_internal_references(root, first_key, last_key, &mut trie.state)?; - if !empty { - trie.root = Some(NodeHash::from(root)); - } - // Reconstruct the internal nodes by inserting the elements on the range - for (key, value) in keys.iter().zip(values.iter()) { - trie.insert(key.0.to_vec(), value.clone())?; - } - // Check for elements to the right of the range before we wipe the sate - let has_right_element = has_right_element(root, last_key.as_bytes(), &trie.state)?; - // Check that the hash is the one we expected (aka the trie was properly reconstructed from the edge proofs and the range) - let hash = trie.hash()?; - if hash != root { - return Err(TrieError::Verify(format!( - "invalid proof, expected root hash {}, got {}", - root, hash - ))); - } - Ok(has_right_element) -} - pub fn verify_range( root: H256, first_key: &H256, @@ -279,51 +142,6 @@ fn fill_state( ) } -fn fill_state_ex( - trie_state: &mut TrieState, - root_hash: H256, - first_key: &H256, - proof_nodes: &ProofNodeStorage, -) -> Result, TrieError> { - let mut path = Nibbles::from_bytes(&first_key.0); - fill_node_ex( - &mut path, - &NodeHash::from(root_hash), - trie_state, - proof_nodes, - ) -} - -/// Fills up the TrieState with nodes from the proof traversing the path given by first_key -/// Returns an error if there are gaps in the proof node path -/// Also returns the value if it is part of the proof -fn fill_node_ex( - path: &mut Nibbles, - node_hash: &NodeHash, - trie_state: &mut TrieState, - proof_nodes: &ProofNodeStorage, -) -> Result, TrieError> { - info!("Filling node: {node_hash:?} on path {path:?}"); - let node = proof_nodes.get_node(node_hash)?; - info!("got node: {node:?}"); - let child_hash = get_child(path, &node); - info!("got child: {child_hash:?}"); - if let Some(ref child_hash) = child_hash { - trie_state.insert_node(node, node_hash.clone()); - fill_node_ex(path, child_hash, trie_state, proof_nodes) - } else { - let value = match &node { - Node::Branch(n) => n.value.clone(), - Node::Extension(_) => vec![], - Node::Leaf(n) => (*path == n.partial) - .then_some(n.value.clone()) - .unwrap_or_default(), - }; - trie_state.insert_node(node, node_hash.clone()); - Ok(value) - } -} - /// Fills up the TrieState with nodes from the proof traversing the path given by first_key /// Returns an error if there are gaps in the proof node path /// Also returns the value if it is part of the proof From 132aecccdd7f067dc1541bf596b1455cd7ac537a Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 16:25:13 -0300 Subject: [PATCH 086/189] Clean verify range tracing --- crates/networking/p2p/peer_channels.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index b27367ccb5..733c603f9d 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -326,13 +326,7 @@ impl PeerChannels { let mut storage_values = vec![]; let mut should_continue = false; // Validate each storage range - let total_slots = slots.len(); while !slots.is_empty() { - info!( - "Verifying slot {}/{}", - total_slots - slots.len(), - total_slots - ); let (hahsed_keys, values): (Vec<_>, Vec<_>) = slots .remove(0) .into_iter() @@ -348,7 +342,6 @@ impl PeerChannels { .map(|val| val.encode_to_vec()) .collect::>(); let storage_root = storage_roots.remove(0); - info!("Storage root: {storage_root}"); // The proof corresponds to the last slot, for the previous ones the slot must be the full range without edge proofs if slots.is_empty() { From f6e75e6ebfd4e9f7847c8e7a9bd946077a376a6c Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 16:26:13 -0300 Subject: [PATCH 087/189] Clean verify range tracing --- crates/networking/p2p/sync.rs | 2 +- crates/storage/trie/trie.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 2c3e87b2b6..4646a8672c 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -14,7 +14,7 @@ use tokio::{ }, time::Instant, }; -use tracing::{debug, info, warn}; +use tracing::{info, warn}; use crate::rlpx::p2p::Capability; use crate::{kademlia::KademliaTable, peer_channels::BlockRequestOrder}; diff --git a/crates/storage/trie/trie.rs b/crates/storage/trie/trie.rs index 4dcfa2f559..ee61b639a6 100644 --- a/crates/storage/trie/trie.rs +++ b/crates/storage/trie/trie.rs @@ -20,7 +20,7 @@ pub use self::db::{libmdbx::LibmdbxTrieDB, libmdbx_dupsort::LibmdbxDupsortTrieDB pub use self::db::{in_memory::InMemoryTrieDB, TrieDB}; pub use self::nibbles::Nibbles; -pub use self::verify_range::{verify_range, verify_range_ex}; +pub use self::verify_range::verify_range; pub use self::{node::Node, state::TrieState}; pub use self::error::TrieError; From b5e5c88a9364c7a038c8993e45440c71ea3c681e Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 17:15:34 -0300 Subject: [PATCH 088/189] Adjust numbers + silence mac failure --- crates/networking/p2p/rlpx/frame.rs | 4 ++-- crates/networking/p2p/rlpx/handshake.rs | 4 ++-- crates/networking/p2p/sync.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/networking/p2p/rlpx/frame.rs b/crates/networking/p2p/rlpx/frame.rs index 3f2771319f..83ce9e3cbf 100644 --- a/crates/networking/p2p/rlpx/frame.rs +++ b/crates/networking/p2p/rlpx/frame.rs @@ -6,7 +6,7 @@ use ethrex_core::H128; use ethrex_rlp::encode::RLPEncode as _; use sha3::Digest as _; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; -use tracing::warn; +use tracing::{debug, warn}; use super::{connection::Established, error::RLPxError}; @@ -127,7 +127,7 @@ pub(crate) async fn read( ); if header_mac != expected_header_mac.0 { - warn!("Mismatched mac"); + debug!("Mismatched mac"); } let header_text = header_ciphertext; diff --git a/crates/networking/p2p/rlpx/handshake.rs b/crates/networking/p2p/rlpx/handshake.rs index e90e17aa30..32fac50c17 100644 --- a/crates/networking/p2p/rlpx/handshake.rs +++ b/crates/networking/p2p/rlpx/handshake.rs @@ -13,7 +13,7 @@ use k256::{ PublicKey, SecretKey, }; use rand::Rng; -use tracing::warn; +use tracing::{debug, warn}; use super::error::RLPxError; @@ -118,7 +118,7 @@ fn decrypt_message( // Verify the MAC. let expected_d = sha256_hmac(&mac_key, &[iv, c], size_data); if d != expected_d { - warn!("Mismatched MAC") + debug!("Mismatched MAC") } // Decrypt the message with the AES key. diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 4646a8672c..8fc2e2993b 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -21,7 +21,7 @@ use crate::{kademlia::KademliaTable, peer_channels::BlockRequestOrder}; /// Maximum amount of times we will ask a peer for an account/storage range /// If the max amount of retries is exceeded we will asume that the state we are requesting is old and no longer available -const MAX_RETRIES: usize = 10; +const MAX_RETRIES: usize = 5; /// The minimum amount of blocks from the head that we want to full sync during a snap sync const MIN_FULL_BLOCKS: usize = 64; From 27e01f516a4b035d035bd7f9d4eb147c2c58a6c3 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 17:30:01 -0300 Subject: [PATCH 089/189] Simplify logic (aka no longer care about bodies and receipts before the pivot --- crates/networking/p2p/sync.rs | 63 ++++++++--------------- crates/storage/store/engines/api.rs | 12 ++--- crates/storage/store/engines/in_memory.rs | 20 ++----- crates/storage/store/engines/libmdbx.rs | 30 +++-------- crates/storage/store/engines/utils.rs | 21 +++----- crates/storage/store/storage.rs | 24 +++------ 6 files changed, 47 insertions(+), 123 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 48a6bc6905..257bb6bcf5 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -70,6 +70,7 @@ impl SyncManager { /// After the sync cycle is complete, the sync mode will be set to full /// If the sync fails, no error will be returned but a warning will be emitted /// [WARNING] Sync is done optimistically, so headers and bodies may be stored even if their data has not been fully synced if the sync is aborted halfway + /// [WARNING] Sync is currenlty simplified and will not download bodies + receipts previous to the pivot during snap sync pub async fn start_sync(&mut self, current_head: H256, sync_head: H256, store: Store) { info!("Syncing from current head {current_head} to sync_head {sync_head}"); let start_time = Instant::now(); @@ -101,21 +102,11 @@ impl SyncManager { // This step is not parallelized let mut all_block_hashes = vec![]; // Check if we have some blocks downloaded from a previous sync attempt - if let Some(last_header) = store.get_latest_downloaded_header()? { - // We might have more headers than bodies downloaded so we should queue missing bodies for download - let last_body = match store.get_latest_downloaded_body()? { - Some(hash) => hash, - None => current_head, - }; - if last_body != last_header { - let mut parent = last_header; - while parent != last_body { - all_block_hashes.insert(0, parent); - parent = store.get_block_header_by_hash(parent)?.unwrap().parent_hash; - } + if matches!(self.sync_mode, SyncMode::Snap) { + if let Some(last_header) = store.get_header_download_checkpoint()? { + // Set latest downloaded header as current head for header fetching + current_head = last_header; } - // Set latest downloaded header as current head for header fetching - current_head = last_header; } loop { let peer = self @@ -144,7 +135,7 @@ impl SyncManager { if !sync_head_found { current_head = *block_hashes.last().unwrap(); // Update snap state - store.set_latest_downloaded_header(current_head)?; + store.set_header_download_checkpoint(current_head)?; } // Store headers and save hashes for full block retrieval all_block_hashes.extend_from_slice(&block_hashes[..]); @@ -163,11 +154,6 @@ impl SyncManager { // - Fetch each block's body and its receipt via eth p2p requests // - Fetch the pivot block's state via snap p2p requests // - Execute blocks after the pivote (like in full-sync) - let store_bodies_handle = tokio::spawn(store_block_bodies( - all_block_hashes.clone(), - self.peers.clone(), - store.clone(), - )); let mut pivot_idx = if all_block_hashes.len() > MIN_FULL_BLOCKS { all_block_hashes.len() - MIN_FULL_BLOCKS } else { @@ -176,6 +162,11 @@ impl SyncManager { let mut pivot_header = store .get_block_header_by_hash(all_block_hashes[pivot_idx])? .ok_or(SyncError::CorruptDB)?; + let store_bodies_handle = tokio::spawn(store_block_bodies( + all_block_hashes[pivot_idx..].to_vec(), + self.peers.clone(), + store.clone(), + )); let mut stale_pivot = !rebuild_state_trie(pivot_header.state_root, self.peers.clone(), store.clone()) .await?; @@ -201,33 +192,23 @@ impl SyncManager { store_bodies_handle.await??; // For all blocks before the pivot: Store the bodies and fetch the receipts // For all blocks after the pivot: Process them fully - let store_receipts_handle = tokio::spawn(store_receipts( - all_block_hashes[pivot_idx..].to_vec(), - self.peers.clone(), - store.clone(), - )); - for hash in all_block_hashes.into_iter() { - let block = store.get_block_by_hash(hash)?.ok_or(SyncError::CorruptDB)?; - if block.header.number <= pivot_header.number { - store.set_canonical_block(block.header.number, hash)?; - store.add_block(block)?; - } else { - store.set_canonical_block(block.header.number, hash)?; - store.update_latest_block_number(block.header.number)?; - ethrex_blockchain::add_block(&block, &store)?; - } + for hash in &all_block_hashes[pivot_idx..] { + let block = store + .get_block_by_hash(*hash)? + .ok_or(SyncError::CorruptDB)?; + store.set_canonical_block(block.header.number, *hash)?; + store.update_latest_block_number(block.header.number)?; + ethrex_blockchain::add_block(&block, &store)?; } - store_receipts_handle.await??; self.last_snap_pivot = pivot_header.number; + // Finished a sync cycle without aborting halfway, clear current checkpoint + store.clear_header_download_checkpoint()?; } SyncMode::Full => { // full-sync: Fetch all block bodies and execute them sequentially to build the state download_and_run_blocks(all_block_hashes, self.peers.clone(), store.clone()).await? } } - // Finished a sync cycle without aborting halfway, clear current state (TODO: write pivot here too) - store.clear_latest_downloaded_header(); - store.clear_latest_downloaded_body(); Ok(()) } } @@ -285,8 +266,6 @@ async fn store_block_bodies( debug!(" Received {} Block Bodies", block_bodies.len()); // Track which bodies we have already fetched let current_block_hashes = block_hashes.drain(..block_bodies.len()); - // Update snap state - store.set_latest_downloaded_body(*current_block_hashes.as_ref().last().unwrap())?; // Add bodies to storage for (hash, body) in current_block_hashes.zip(block_bodies.into_iter()) { store.add_block_body(hash, body)?; @@ -302,6 +281,8 @@ async fn store_block_bodies( } /// Fetches all receipts for the given block hashes via p2p and stores them +// TODO: remove allow when used again +#[allow(unused)] async fn store_receipts( mut block_hashes: Vec, peers: Arc>, diff --git a/crates/storage/store/engines/api.rs b/crates/storage/store/engines/api.rs index 4901be6d1a..db3f90992d 100644 --- a/crates/storage/store/engines/api.rs +++ b/crates/storage/store/engines/api.rs @@ -252,15 +252,9 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { // Snap State methods - fn set_latest_downloaded_header(&self, block_hash: BlockHash) -> Result<(), StoreError>; + fn set_header_download_checkpoint(&self, block_hash: BlockHash) -> Result<(), StoreError>; - fn get_latest_downloaded_header(&self) -> Result, StoreError>; + fn get_header_download_checkpoint(&self) -> Result, StoreError>; - fn clear_latest_downloaded_header(&self) -> Result<(), StoreError>; - - fn set_latest_downloaded_body(&self, block_hash: BlockHash) -> Result<(), StoreError>; - - fn get_latest_downloaded_body(&self) -> Result, StoreError>; - - fn clear_latest_downloaded_body(&self) -> Result<(), StoreError>; + fn clear_header_download_checkpoint(&self) -> Result<(), StoreError>; } diff --git a/crates/storage/store/engines/in_memory.rs b/crates/storage/store/engines/in_memory.rs index ff823b05f7..ba0d69aa3a 100644 --- a/crates/storage/store/engines/in_memory.rs +++ b/crates/storage/store/engines/in_memory.rs @@ -437,33 +437,19 @@ impl StoreEngine for Store { Ok(()) } - fn set_latest_downloaded_header(&self, block_hash: BlockHash) -> Result<(), StoreError> { + fn set_header_download_checkpoint(&self, block_hash: BlockHash) -> Result<(), StoreError> { self.inner().snap_state.last_downloaded_header_hash = Some(block_hash); Ok(()) } - fn get_latest_downloaded_header(&self) -> Result, StoreError> { + fn get_header_download_checkpoint(&self) -> Result, StoreError> { Ok(self.inner().snap_state.last_downloaded_header_hash) } - fn clear_latest_downloaded_header(&self) -> Result<(), StoreError> { + fn clear_header_download_checkpoint(&self) -> Result<(), StoreError> { self.inner().snap_state.last_downloaded_header_hash = None; Ok(()) } - - fn set_latest_downloaded_body(&self, block_hash: BlockHash) -> Result<(), StoreError> { - self.inner().snap_state.last_downloaded_body_hash = Some(block_hash); - Ok(()) - } - - fn get_latest_downloaded_body(&self) -> Result, StoreError> { - Ok(self.inner().snap_state.last_downloaded_body_hash) - } - - fn clear_latest_downloaded_body(&self) -> Result<(), StoreError> { - self.inner().snap_state.last_downloaded_body_hash = None; - Ok(()) - } } impl Debug for Store { diff --git a/crates/storage/store/engines/libmdbx.rs b/crates/storage/store/engines/libmdbx.rs index 423a6409c6..1e33eb20db 100644 --- a/crates/storage/store/engines/libmdbx.rs +++ b/crates/storage/store/engines/libmdbx.rs @@ -526,40 +526,22 @@ impl StoreEngine for Store { Ok(receipts.into_iter().map(|receipt| receipt.to()).collect()) } - fn set_latest_downloaded_header(&self, block_hash: BlockHash) -> Result<(), StoreError> { + fn set_header_download_checkpoint(&self, block_hash: BlockHash) -> Result<(), StoreError> { self.write::( - SnapStateIndex::LatestDownloadedHeader, + SnapStateIndex::HeaderDownloadCheckpoint, block_hash.encode_to_vec(), ) } - fn get_latest_downloaded_header(&self) -> Result, StoreError> { - self.read::(SnapStateIndex::LatestDownloadedHeader)? + fn get_header_download_checkpoint(&self) -> Result, StoreError> { + self.read::(SnapStateIndex::HeaderDownloadCheckpoint)? .map(|ref h| BlockHash::decode(h)) .transpose() .map_err(StoreError::RLPDecode) } - fn clear_latest_downloaded_header(&self) -> Result<(), StoreError> { - self.delete::(SnapStateIndex::LatestDownloadedHeader) - } - - fn set_latest_downloaded_body(&self, block_hash: BlockHash) -> Result<(), StoreError> { - self.write::( - SnapStateIndex::LatestDownloadedBody, - block_hash.encode_to_vec(), - ) - } - - fn get_latest_downloaded_body(&self) -> Result, StoreError> { - self.read::(SnapStateIndex::LatestDownloadedBody)? - .map(|ref h| BlockHash::decode(h)) - .transpose() - .map_err(StoreError::RLPDecode) - } - - fn clear_latest_downloaded_body(&self) -> Result<(), StoreError> { - self.delete::(SnapStateIndex::LatestDownloadedBody) + fn clear_header_download_checkpoint(&self) -> Result<(), StoreError> { + self.delete::(SnapStateIndex::HeaderDownloadCheckpoint) } } diff --git a/crates/storage/store/engines/utils.rs b/crates/storage/store/engines/utils.rs index 3af5984acd..cc041bedc1 100644 --- a/crates/storage/store/engines/utils.rs +++ b/crates/storage/store/engines/utils.rs @@ -35,27 +35,20 @@ impl From for ChainDataIndex { } } -/// Represents the key for each unique value of the chain data stored in the db -// Stores chain-specific data such as chain id and latest finalized/pending/safe block number +/// Represents the key for each unique value of the snap state stored in the db +// Stores the snap state from previous sync cycles. Currently stores the header download checkpoint +//, but will later on also include the body download checkpoint and the last pivot used #[derive(Debug, Copy, Clone)] pub enum SnapStateIndex { - // Pivot used by the last completed snap sync cycle - LastPivot = 0, - // Hash of the last downloaded header in a previous sync that was aborted - LatestDownloadedHeader = 1, - // Hash of the last downloaded body in a previous sync that was aborted - LatestDownloadedBody = 2, + // Hash of the last downloaded header in a previous sync cycle that was aborted + HeaderDownloadCheckpoint = 0, } impl From for SnapStateIndex { fn from(value: u8) -> Self { match value { - x if x == SnapStateIndex::LastPivot as u8 => SnapStateIndex::LastPivot, - x if x == SnapStateIndex::LatestDownloadedHeader as u8 => { - SnapStateIndex::LatestDownloadedHeader - } - x if x == SnapStateIndex::LatestDownloadedBody as u8 => { - SnapStateIndex::LatestDownloadedBody + x if x == SnapStateIndex::HeaderDownloadCheckpoint as u8 => { + SnapStateIndex::HeaderDownloadCheckpoint } _ => panic!("Invalid value when casting to SnapDataIndex: {}", value), } diff --git a/crates/storage/store/storage.rs b/crates/storage/store/storage.rs index 39e1422b74..81764749ca 100644 --- a/crates/storage/store/storage.rs +++ b/crates/storage/store/storage.rs @@ -1000,28 +1000,16 @@ impl Store { .is_some()) } - pub fn set_latest_downloaded_header(&self, block_hash: BlockHash) -> Result<(), StoreError> { - self.engine.set_latest_downloaded_header(block_hash) + pub fn set_header_download_checkpoint(&self, block_hash: BlockHash) -> Result<(), StoreError> { + self.engine.set_header_download_checkpoint(block_hash) } - pub fn get_latest_downloaded_header(&self) -> Result, StoreError> { - self.engine.get_latest_downloaded_header() + pub fn get_header_download_checkpoint(&self) -> Result, StoreError> { + self.engine.get_header_download_checkpoint() } - pub fn clear_latest_downloaded_header(&self) -> Result<(), StoreError> { - self.engine.clear_latest_downloaded_header() - } - - pub fn set_latest_downloaded_body(&self, block_hash: BlockHash) -> Result<(), StoreError> { - self.engine.set_latest_downloaded_body(block_hash) - } - - pub fn get_latest_downloaded_body(&self) -> Result, StoreError> { - self.engine.get_latest_downloaded_body() - } - - pub fn clear_latest_downloaded_body(&self) -> Result<(), StoreError> { - self.engine.clear_latest_downloaded_body() + pub fn clear_header_download_checkpoint(&self) -> Result<(), StoreError> { + self.engine.clear_header_download_checkpoint() } } From 4ce3e0c3255aaecd3c10feaae75d8a53298d8ee8 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 10:44:43 -0300 Subject: [PATCH 090/189] Use current SyncStatus when applying a fork choice update --- crates/networking/rpc/engine/fork_choice.rs | 21 ++++++++++------ crates/networking/rpc/rpc.rs | 28 ++++++++++++++++++++- 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/crates/networking/rpc/engine/fork_choice.rs b/crates/networking/rpc/engine/fork_choice.rs index 42e1393c72..a223f12975 100644 --- a/crates/networking/rpc/engine/fork_choice.rs +++ b/crates/networking/rpc/engine/fork_choice.rs @@ -14,7 +14,7 @@ use crate::{ payload::PayloadStatus, }, utils::RpcRequest, - RpcApiContext, RpcErr, RpcHandler, + RpcApiContext, RpcErr, RpcHandler, SyncStatus, }; #[derive(Debug)] @@ -153,13 +153,20 @@ fn handle_forkchoice( fork_choice_state.safe_block_hash, fork_choice_state.finalized_block_hash ); + // Check if there is an ongoing sync before applying the forkchoice + let fork_choice_res = match context.sync_status()? { + // Apply current fork choice + SyncStatus::Inactive => apply_fork_choice( + &context.storage, + fork_choice_state.head_block_hash, + fork_choice_state.safe_block_hash, + fork_choice_state.finalized_block_hash, + ), + // Restart sync if needed + _ => Err(InvalidForkChoice::Syncing), + }; - match apply_fork_choice( - &context.storage, - fork_choice_state.head_block_hash, - fork_choice_state.safe_block_hash, - fork_choice_state.finalized_block_hash, - ) { + match fork_choice_res { Ok(head) => Ok(( Some(head), ForkChoiceResponse::from(PayloadStatus::valid_with_hash( diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index 336d0d896f..d194934b3e 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -62,7 +62,7 @@ mod web3; use axum::extract::State; use ethrex_net::types::Node; -use ethrex_storage::Store; +use ethrex_storage::{error::StoreError, Store}; #[derive(Debug, Clone)] pub struct RpcApiContext { @@ -73,6 +73,32 @@ pub struct RpcApiContext { syncer: Arc>, } +/// Describes the client's current sync status: +/// Inactive: There is no active sync process +/// Active: The client is currently syncing +/// Pending: The previous sync process became stale, awaiting restart +pub enum SyncStatus { + Inactive, + Active, + Pending, +} + +impl RpcApiContext { + /// Returns the engine's current sync status, see [SyncStatus] + pub fn sync_status(&self) -> Result { + // Try to get hold of the sync manager, if we can't then it means it is currently involved in a sync process + Ok(if self.syncer.try_lock().is_ok() { + SyncStatus::Active + // Check if there is a checkpoint left from a previous aborted sync + } else if self.storage.get_latest_downloaded_header()?.is_some() { + SyncStatus::Pending + // No trace of a sync being handled + } else { + SyncStatus::Inactive + }) + } +} + trait RpcHandler: Sized { fn parse(params: &Option>) -> Result; From fc81ab26190fb250ded2db7fec4de1bbbf379b73 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 14 Jan 2025 11:36:55 -0300 Subject: [PATCH 091/189] Use current SyncStatus when applying a new payload --- crates/networking/p2p/sync.rs | 4 ++ crates/networking/rpc/engine/payload.rs | 51 +++++++++++++++---------- 2 files changed, 35 insertions(+), 20 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 257bb6bcf5..5fe7346cbd 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -162,6 +162,10 @@ impl SyncManager { let mut pivot_header = store .get_block_header_by_hash(all_block_hashes[pivot_idx])? .ok_or(SyncError::CorruptDB)?; + debug!( + "Selected block {} as pivot for snap sync", + pivot_header.number + ); let store_bodies_handle = tokio::spawn(store_block_bodies( all_block_hashes[pivot_idx..].to_vec(), self.peers.clone(), diff --git a/crates/networking/rpc/engine/payload.rs b/crates/networking/rpc/engine/payload.rs index 08f80292f7..de2cf115be 100644 --- a/crates/networking/rpc/engine/payload.rs +++ b/crates/networking/rpc/engine/payload.rs @@ -8,7 +8,7 @@ use tracing::{error, info, warn}; use crate::types::payload::{ExecutionPayload, ExecutionPayloadResponse, PayloadStatus}; use crate::utils::RpcRequest; -use crate::{RpcApiContext, RpcErr, RpcHandler}; +use crate::{RpcApiContext, RpcErr, RpcHandler, SyncStatus}; // NewPayload V1-V2-V3 implementations pub struct NewPayloadV1Request { @@ -92,20 +92,28 @@ impl RpcHandler for NewPayloadV3Request { let block = get_block_from_payload(&self.payload, Some(self.parent_beacon_block_root))?; validate_fork(&block, Fork::Cancun, &context)?; let payload_status = { - if let Err(RpcErr::Internal(error_msg)) = validate_block_hash(&self.payload, &block) { - PayloadStatus::invalid_with_err(&error_msg) - } else { - let blob_versioned_hashes: Vec = block - .body - .transactions - .iter() - .flat_map(|tx| tx.blob_versioned_hashes()) - .collect(); - - if self.expected_blob_versioned_hashes != blob_versioned_hashes { - PayloadStatus::invalid_with_err("Invalid blob_versioned_hashes") - } else { - execute_payload(&block, &context)? + // Ignore incoming + match context.sync_status()? { + SyncStatus::Active | SyncStatus::Pending => PayloadStatus::syncing(), + SyncStatus::Inactive => { + if let Err(RpcErr::Internal(error_msg)) = + validate_block_hash(&self.payload, &block) + { + PayloadStatus::invalid_with_err(&error_msg) + } else { + let blob_versioned_hashes: Vec = block + .body + .transactions + .iter() + .flat_map(|tx| tx.blob_versioned_hashes()) + .collect(); + + if self.expected_blob_versioned_hashes != blob_versioned_hashes { + PayloadStatus::invalid_with_err("Invalid blob_versioned_hashes") + } else { + execute_payload(&block, &context)? + } + } } } }; @@ -195,11 +203,14 @@ fn handle_new_payload_v1_v2( ) -> Result { let block = get_block_from_payload(payload, None)?; validate_fork(&block, fork, &context)?; - let payload_status = { - if let Err(RpcErr::Internal(error_msg)) = validate_block_hash(payload, &block) { - PayloadStatus::invalid_with_err(&error_msg) - } else { - execute_payload(&block, &context)? + let payload_status = match context.sync_status()? { + SyncStatus::Active | SyncStatus::Pending => PayloadStatus::syncing(), + SyncStatus::Inactive => { + if let Err(RpcErr::Internal(error_msg)) = validate_block_hash(payload, &block) { + PayloadStatus::invalid_with_err(&error_msg) + } else { + execute_payload(&block, &context)? + } } }; serde_json::to_value(payload_status).map_err(|error| RpcErr::Internal(error.to_string())) From 730bb5e316b0b9df9deab160ca8e75f6f0ebff45 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 10:25:48 -0300 Subject: [PATCH 092/189] Mute non-sync logs --- crates/networking/p2p/kademlia.rs | 2 +- crates/networking/p2p/net.rs | 8 +++++--- crates/networking/p2p/rlpx/connection.rs | 8 ++++++-- crates/networking/rpc/engine/fork_choice.rs | 2 +- crates/networking/rpc/eth/block.rs | 2 +- crates/networking/rpc/eth/client.rs | 2 +- crates/networking/rpc/rpc.rs | 4 ++-- 7 files changed, 17 insertions(+), 11 deletions(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index 63af382410..6c90c2d858 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -308,7 +308,7 @@ impl KademliaTable { /// The peer is selected randomly, and doesn't guarantee that the selected peer is not currenlty busy /// If no peer is found, this method will try again after 10 seconds pub async fn get_peer_channels(&self, capability: Capability) -> PeerChannels { - self.show_peer_stats(); + //self.show_peer_stats(); let filter = |peer: &PeerData| -> bool { // Search for peers with an active connection that support the required capabilities peer.channels.is_some() && peer.supported_capabilities.contains(&capability) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 30cf359ca8..b4b88ca2df 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -816,15 +816,17 @@ async fn handle_peer_as_initiator( connection_broadcast: broadcast::Sender<(tokio::task::Id, Arc)>, ) { debug!("Trying RLPx connection with {node:?}"); - let stream = TcpSocket::new_v4() + let Ok(stream) = TcpSocket::new_v4() .unwrap() .connect(SocketAddr::new(node.ip, node.tcp_port)) .await - .unwrap(); + else { + return; + }; match RLPxConnection::initiator(signer, msg, stream, storage, connection_broadcast).await { Ok(mut conn) => conn.start_peer(table).await, Err(e) => { - error!("Error: {e}, Could not start connection with {node:?}"); + debug!("Error: {e}, Could not start connection with {node:?}"); } } } diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index f2463d6c1a..494becd1dd 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{io::Read, sync::Arc}; use crate::{ peer_channels::PeerChannels, @@ -196,7 +196,7 @@ impl RLPxConnection { .unwrap_or_else(|e| debug!("Could not send Disconnect message: ({e}).")); if let Ok(node_id) = self.get_remote_node_id() { // Discard peer from kademlia table - error!("{error_text}: ({error}), discarding peer {node_id}"); + debug!("{error_text}: ({error}), discarding peer {node_id}"); table.lock().await.replace_peer(node_id); } else { debug!("{error_text}: ({error}), unknown peer") @@ -637,6 +637,10 @@ impl RLPxConnection { let msg_size = u16::from_be_bytes(ack_data) as usize; // Read the rest of the message + // Guard unwrap + if buf.len() < msg_size + 2 { + return Err(RLPxError::ConnectionError(String::from("bad buf size"))); + } self.stream .read_exact(&mut buf[2..msg_size + 2]) .await diff --git a/crates/networking/rpc/engine/fork_choice.rs b/crates/networking/rpc/engine/fork_choice.rs index a223f12975..2d61a02fd2 100644 --- a/crates/networking/rpc/engine/fork_choice.rs +++ b/crates/networking/rpc/engine/fork_choice.rs @@ -146,7 +146,7 @@ fn handle_forkchoice( context: RpcApiContext, version: usize, ) -> Result<(Option, ForkChoiceResponse), RpcErr> { - info!( + debug!( "New fork choice request v{} with head: {:#x}, safe: {:#x}, finalized: {:#x}.", version, fork_choice_state.head_block_hash, diff --git a/crates/networking/rpc/eth/block.rs b/crates/networking/rpc/eth/block.rs index b226a390f5..e3e4ac8562 100644 --- a/crates/networking/rpc/eth/block.rs +++ b/crates/networking/rpc/eth/block.rs @@ -70,7 +70,7 @@ impl RpcHandler for GetBlockByNumberRequest { } fn handle(&self, context: RpcApiContext) -> Result { let storage = &context.storage; - info!("Requested block with number: {}", self.block); + debug!("Requested block with number: {}", self.block); let block_number = match self.block.resolve_block_number(storage)? { Some(block_number) => block_number, _ => return Ok(Value::Null), diff --git a/crates/networking/rpc/eth/client.rs b/crates/networking/rpc/eth/client.rs index f090b0c701..7ef14eec57 100644 --- a/crates/networking/rpc/eth/client.rs +++ b/crates/networking/rpc/eth/client.rs @@ -10,7 +10,7 @@ impl RpcHandler for ChainId { } fn handle(&self, context: RpcApiContext) -> Result { - info!("Requested chain id"); + debug!("Requested chain id"); let chain_spec = context .storage .get_chain_config() diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index d194934b3e..e264b6aeb1 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -143,9 +143,9 @@ pub async fn start_api( let filters = active_filters.clone(); loop { interval.tick().await; - tracing::info!("Running filter clean task"); + tracing::debug!("Running filter clean task"); filter::clean_outdated_filters(filters.clone(), FILTER_DURATION); - tracing::info!("Filter clean task complete"); + tracing::debug!("Filter clean task complete"); } }); From 7145b9278608338b7f75293709558831f4a0834f Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 10:29:16 -0300 Subject: [PATCH 093/189] Fix --- crates/networking/rpc/eth/block.rs | 2 +- crates/networking/rpc/eth/client.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/networking/rpc/eth/block.rs b/crates/networking/rpc/eth/block.rs index e3e4ac8562..a45545d806 100644 --- a/crates/networking/rpc/eth/block.rs +++ b/crates/networking/rpc/eth/block.rs @@ -1,7 +1,7 @@ use ethrex_blockchain::find_parent_header; use ethrex_rlp::encode::RLPEncode; use serde_json::Value; -use tracing::info; +use tracing::{info, debug}; use crate::{ types::{ diff --git a/crates/networking/rpc/eth/client.rs b/crates/networking/rpc/eth/client.rs index 7ef14eec57..e143e052ac 100644 --- a/crates/networking/rpc/eth/client.rs +++ b/crates/networking/rpc/eth/client.rs @@ -1,5 +1,5 @@ use serde_json::Value; -use tracing::info; +use tracing::{info, debug}; use crate::{utils::RpcErr, RpcApiContext, RpcHandler}; From 9c03b78dfbbca50bcfe7547f0eb597d0e7990f79 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 10:30:00 -0300 Subject: [PATCH 094/189] Fix --- crates/networking/rpc/engine/fork_choice.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/rpc/engine/fork_choice.rs b/crates/networking/rpc/engine/fork_choice.rs index 2d61a02fd2..53ec50e27f 100644 --- a/crates/networking/rpc/engine/fork_choice.rs +++ b/crates/networking/rpc/engine/fork_choice.rs @@ -6,7 +6,7 @@ use ethrex_blockchain::{ }; use ethrex_core::types::BlockHeader; use serde_json::Value; -use tracing::{info, warn}; +use tracing::{info, warn, debug}; use crate::{ types::{ From 36f26eead115e3a01e6b49278839ee6049c4e9ef Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 10:32:41 -0300 Subject: [PATCH 095/189] Mute non-sync logs --- crates/networking/p2p/rlpx/connection.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 494becd1dd..d4291e6ba4 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -658,12 +658,12 @@ impl RLPxConnection { if let RLPxConnectionState::Established(state) = &mut self.state { let mut frame_buffer = vec![]; message.encode(&mut frame_buffer)?; - let ret = frame::write(frame_buffer, state, &mut self.stream).await; - if ret.is_err() { - let node_id = self.get_remote_node_id()?; - warn!("Failed to send message: {message:?}, to: {node_id}"); - } - ret + frame::write(frame_buffer, state, &mut self.stream).await + // if ret.is_err() { + // let node_id = self.get_remote_node_id()?; + // warn!("Failed to send message: {message:?}, to: {node_id}"); + // } + // ret } else { Err(RLPxError::InvalidState()) } From fb4fb3e6037e47a3c85dbf0be41a97a161e594eb Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 10:51:11 -0300 Subject: [PATCH 096/189] Periodically show peer stats --- cmd/ethrex/ethrex.rs | 4 +++- crates/networking/p2p/net.rs | 10 ++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index c9131eb56f..7283187b0f 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -258,7 +258,7 @@ async fn main() { tcp_socket_addr, bootnodes, signer, - peer_table, + peer_table.clone(), store, ) .into_future(); @@ -266,6 +266,8 @@ async fn main() { } } + tracker.spawn(ethrex_net::peridically_show_peer_stats(peer_table)); + tokio::select! { _ = tokio::signal::ctrl_c() => { info!("Server shut down started..."); diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index b4b88ca2df..de20b7e23b 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -837,6 +837,16 @@ pub fn node_id_from_signing_key(signer: &SigningKey) -> H512 { H512::from_slice(&encoded.as_bytes()[1..]) } + +pub async fn peridically_show_peer_stats(peer_table: Arc>) { + const INTERVAL_DURATION: tokio::time::Duration = tokio::time::Duration::from_secs(30); + let mut interval = tokio::time::interval(INTERVAL_DURATION); + loop { + peer_table.lock().await.show_peer_stats(); + interval.tick().await; + } +} + #[cfg(test)] mod tests { use super::*; From 824f6230594c75438323f690efa3b6f067bc362e Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 10:51:11 -0300 Subject: [PATCH 097/189] Periodically show peer stats --- cmd/ethrex/ethrex.rs | 4 +++- crates/networking/p2p/net.rs | 10 ++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index c9131eb56f..7283187b0f 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -258,7 +258,7 @@ async fn main() { tcp_socket_addr, bootnodes, signer, - peer_table, + peer_table.clone(), store, ) .into_future(); @@ -266,6 +266,8 @@ async fn main() { } } + tracker.spawn(ethrex_net::peridically_show_peer_stats(peer_table)); + tokio::select! { _ = tokio::signal::ctrl_c() => { info!("Server shut down started..."); diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 2de5c1c695..5c726c0f4b 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -821,6 +821,16 @@ pub fn node_id_from_signing_key(signer: &SigningKey) -> H512 { H512::from_slice(&encoded.as_bytes()[1..]) } + +pub async fn peridically_show_peer_stats(peer_table: Arc>) { + const INTERVAL_DURATION: tokio::time::Duration = tokio::time::Duration::from_secs(30); + let mut interval = tokio::time::interval(INTERVAL_DURATION); + loop { + peer_table.lock().await.show_peer_stats(); + interval.tick().await; + } +} + #[cfg(test)] mod tests { use super::*; From de73c291d1b282cb612459eced4fd87cd59fab4f Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 10:54:50 -0300 Subject: [PATCH 098/189] Add comment --- crates/networking/p2p/net.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 5c726c0f4b..35e6567e17 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -821,7 +821,7 @@ pub fn node_id_from_signing_key(signer: &SigningKey) -> H512 { H512::from_slice(&encoded.as_bytes()[1..]) } - +/// Shows the amount of connected peers, active peers, and peers suitable for snap sync on a set interval pub async fn peridically_show_peer_stats(peer_table: Arc>) { const INTERVAL_DURATION: tokio::time::Duration = tokio::time::Duration::from_secs(30); let mut interval = tokio::time::interval(INTERVAL_DURATION); From b80d447a5ed2536d7936a240e73bffadeb557a68 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 10:57:06 -0300 Subject: [PATCH 099/189] Fix typo + move call to appropiate context --- cmd/ethrex/ethrex.rs | 3 +-- crates/networking/p2p/net.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index 7283187b0f..10dec719b6 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -263,11 +263,10 @@ async fn main() { ) .into_future(); tracker.spawn(networking); + tracker.spawn(ethrex_net::periodically_show_peer_stats(peer_table)); } } - tracker.spawn(ethrex_net::peridically_show_peer_stats(peer_table)); - tokio::select! { _ = tokio::signal::ctrl_c() => { info!("Server shut down started..."); diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 35e6567e17..c940d15e89 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -822,7 +822,7 @@ pub fn node_id_from_signing_key(signer: &SigningKey) -> H512 { } /// Shows the amount of connected peers, active peers, and peers suitable for snap sync on a set interval -pub async fn peridically_show_peer_stats(peer_table: Arc>) { +pub async fn periodically_show_peer_stats(peer_table: Arc>) { const INTERVAL_DURATION: tokio::time::Duration = tokio::time::Duration::from_secs(30); let mut interval = tokio::time::interval(INTERVAL_DURATION); loop { From d5a37c4a3a80fa0ef325898135bebf233db73aff Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 12:06:58 -0300 Subject: [PATCH 100/189] Revert "Periodically show peer stats" This reverts commit fb4fb3e6037e47a3c85dbf0be41a97a161e594eb. --- cmd/ethrex/ethrex.rs | 4 +--- crates/networking/p2p/net.rs | 10 ---------- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index 7283187b0f..c9131eb56f 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -258,7 +258,7 @@ async fn main() { tcp_socket_addr, bootnodes, signer, - peer_table.clone(), + peer_table, store, ) .into_future(); @@ -266,8 +266,6 @@ async fn main() { } } - tracker.spawn(ethrex_net::peridically_show_peer_stats(peer_table)); - tokio::select! { _ = tokio::signal::ctrl_c() => { info!("Server shut down started..."); diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index de20b7e23b..b4b88ca2df 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -837,16 +837,6 @@ pub fn node_id_from_signing_key(signer: &SigningKey) -> H512 { H512::from_slice(&encoded.as_bytes()[1..]) } - -pub async fn peridically_show_peer_stats(peer_table: Arc>) { - const INTERVAL_DURATION: tokio::time::Duration = tokio::time::Duration::from_secs(30); - let mut interval = tokio::time::interval(INTERVAL_DURATION); - loop { - peer_table.lock().await.show_peer_stats(); - interval.tick().await; - } -} - #[cfg(test)] mod tests { use super::*; From 7399f89c67fe5755d3be84005fdda99ad19dff27 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 12:31:54 -0300 Subject: [PATCH 101/189] Trace storage bottlenecks --- crates/networking/p2p/net.rs | 2 +- crates/networking/p2p/sync.rs | 14 +++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 79e905fc6f..394da40eb2 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -839,7 +839,7 @@ pub fn node_id_from_signing_key(signer: &SigningKey) -> H512 { /// Shows the amount of connected peers, active peers, and peers suitable for snap sync on a set interval pub async fn periodically_show_peer_stats(peer_table: Arc>) { - const INTERVAL_DURATION: tokio::time::Duration = tokio::time::Duration::from_secs(30); + const INTERVAL_DURATION: tokio::time::Duration = tokio::time::Duration::from_secs(200); let mut interval = tokio::time::interval(INTERVAL_DURATION); loop { peer_table.lock().await.show_peer_stats(); diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 8fc2e2993b..62b2edd44f 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -541,13 +541,16 @@ async fn storage_fetcher( let mut incoming = true; while incoming { // Fetch incoming requests + let awaiting_batch = Instant::now(); match receiver.recv().await { Some(account_hashes_and_roots) if !account_hashes_and_roots.is_empty() => { + info!("Spent {} secs waiting for incoming batch", awaiting_batch.elapsed().as_secs()); pending_storage.extend(account_hashes_and_roots); info!( "Received incoming storage range request, current batch: {}/{BATCH_SIZE}", pending_storage.len() - ) + ); + info!("Number of messages in receiver: {}", receiver.len()); } // Disconnect / Empty message signaling no more bytecodes to sync _ => { @@ -555,17 +558,23 @@ async fn storage_fetcher( incoming = false } } + info!("Processing current batches"); // If we have enough pending bytecodes to fill a batch // or if we have no more incoming batches, spawn a fetch process while pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty() { + let now = Instant::now(); let next_batch = pending_storage .drain(..BATCH_SIZE.min(pending_storage.len())) .collect::>(); + let batch_size = next_batch.len(); let remaining = fetch_storage_batch(next_batch, state_root, peers.clone(), store.clone()).await?; + let remaining_size = remaining.len(); // Add unfeched bytecodes back to the queue pending_storage.extend(remaining); + info!("Processed Batch of size {} with {} remaing in {} secs", batch_size, remaining_size, now.elapsed().as_secs()) } + info!("Finished processing current batches"); } Ok(()) } @@ -611,10 +620,13 @@ async fn fetch_storage_batch( // For now we will fetch the full range again // Return remaining code hashes in the batch if we couldn't fetch all of them return Ok(batch); + } { + info!("Invalid/Empty batch, retrying") } } // This is a corner case where we fetched an account range for a block but the chain has moved on and the block // was dropped by the peer's snapshot. We will keep the fetcher alive to avoid errors and stop fetching as from the next account + info!("Pivot became stale but we cannot handle it here, on no!"); Ok(vec![]) } From d9654bd304610a01f75ab827fe1bb44705553e01 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 14:24:40 -0300 Subject: [PATCH 102/189] Trace storage bottlenecks --- crates/networking/p2p/sync.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 62b2edd44f..a479ec94bf 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -602,6 +602,7 @@ async fn fetch_storage_batch( let mut _last_range; // Hold on to the last batch (if incomplete) if incomplete { + info!("Last element in batch was not completely fetched"); // An incomplete range cannot be empty _last_range = (keys.pop().unwrap(), values.pop().unwrap()); } From 9b3900a3d720149852337860a1789399336765f2 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 14:45:50 -0300 Subject: [PATCH 103/189] Abort early if stale pivot on storage fetcher --- crates/networking/p2p/sync.rs | 14 +++++++++++--- crates/storage/trie/verify_range.rs | 16 ++++++++++++++-- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index a479ec94bf..80b7801438 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -444,6 +444,9 @@ async fn rebuild_state_trie( retry_count += 1; } } + if retry_count >= MAX_RETRIES { + return Err(SyncError::StalePivot) + } info!("Account Trie fully fetched, signaling storage fetcher process"); // Send empty batch to signal that no more batches are incoming storage_sender.send(vec![]).await?; @@ -527,12 +530,14 @@ async fn fetch_bytecode_batch( } /// Waits for incoming account hashes & storage roots from the receiver channel endpoint, queues them, and fetches and stores their bytecodes in batches +/// This function will remain active until either an empty vec is sent to the receiver or the pivot becomes stale +/// In the last case, the fetcher will return an internal SyncError::StalePivot error async fn storage_fetcher( mut receiver: Receiver>, peers: Arc>, store: Store, state_root: H256, -) -> Result<(), StoreError> { +) -> Result<(), SyncError> { const BATCH_SIZE: usize = 50; // Pending list of storages to fetch let mut pending_storage: Vec<(H256, H256)> = vec![]; @@ -585,7 +590,7 @@ async fn fetch_storage_batch( state_root: H256, peers: Arc>, store: Store, -) -> Result, StoreError> { +) -> Result, SyncError> { info!( "Requesting storage ranges for addresses {}..{}", batch.first().unwrap().0, @@ -622,7 +627,7 @@ async fn fetch_storage_batch( // Return remaining code hashes in the batch if we couldn't fetch all of them return Ok(batch); } { - info!("Invalid/Empty batch, retrying") + return Err(SyncError::StalePivot) } } // This is a corner case where we fetched an account range for a block but the chain has moved on and the block @@ -847,4 +852,7 @@ enum SyncError { JoinHandle(#[from] tokio::task::JoinError), #[error("Missing data from DB")] CorruptDB, + // This is an internal signal for fetcher processes and should not be returned by the main sync cycle + #[error("[INTERNAL] Stale Pivot")] + StalePivot, } diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index f6c29ca47c..bece7b341a 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -7,10 +7,22 @@ use tracing::{info, warn}; use crate::{ nibbles::Nibbles, node::Node, node_hash::NodeHash, state::TrieState, Trie, TrieError, ValueRLP, }; - +pub fn verify_range( + root: H256, + first_key: &H256, + keys: &[H256], + values: &[ValueRLP], + proof: &[Vec], +) -> Result { + let e = verify_range_i(root, first_key, keys, values, proof); + if let Err(ref e) = e { + warn!("Verify range failure: {e}"); + } + e +} /// Verifies that the key value range belongs to the trie with the given root given the edge proofs for the range /// Also returns true if there is more state to be fetched (aka if there are more keys to the right of the given range) -pub fn verify_range( +pub fn verify_range_i( root: H256, first_key: &H256, keys: &[H256], From 20f9f47e827cea6d1176720cc179af9bd4e23a47 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 14:59:52 -0300 Subject: [PATCH 104/189] Adjust batch sizes --- crates/networking/p2p/sync.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 80b7801438..6af7b98b93 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -476,7 +476,7 @@ async fn bytecode_fetcher( peers: Arc>, store: Store, ) -> Result<(), SyncError> { - const BATCH_SIZE: usize = 200; + const BATCH_SIZE: usize = 100; let mut pending_bytecodes: Vec = vec![]; let mut incoming = true; while incoming { @@ -538,7 +538,7 @@ async fn storage_fetcher( store: Store, state_root: H256, ) -> Result<(), SyncError> { - const BATCH_SIZE: usize = 50; + const BATCH_SIZE: usize = 100; // Pending list of storages to fetch let mut pending_storage: Vec<(H256, H256)> = vec![]; // TODO: Also add a queue for storages that were incompletely fecthed, From 3db8c76ae68303d5fa8744714a2a0fb031b44833 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 17:29:25 -0300 Subject: [PATCH 105/189] Handle large storage batch --- crates/networking/p2p/peer_channels.rs | 68 ++++++++++++- crates/networking/p2p/sync.rs | 107 ++++++++++++++++---- crates/networking/rpc/engine/fork_choice.rs | 2 +- crates/networking/rpc/eth/block.rs | 2 +- crates/networking/rpc/eth/client.rs | 2 +- 5 files changed, 159 insertions(+), 22 deletions(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 2f8bfb5c36..ed2dcb3e1b 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -276,7 +276,7 @@ impl PeerChannels { /// Requests storage ranges for accounts given their hashed address and storage roots, and the root of their state trie /// account_hashes & storage_roots must have the same length /// storage_roots must not contain empty trie hashes, we will treat empty ranges as invalid responses - /// Returns true if the last accoun't storage was not completely fetched by the request + /// Returns true if the last account's storage was not completely fetched by the request /// Returns the list of hashed storage keys and values for each account's storage or None if: /// - There are no available peers (the node just started up or was rejected by all other nodes) /// - The response timed out @@ -349,7 +349,7 @@ impl PeerChannels { verify_range(storage_root, &start, &hahsed_keys, &encoded_values, &proof) .ok()?; } else { - verify_range(storage_root, &start, &hahsed_keys, &encoded_values, &[]).ok()?; + verify_range(storage_root, &start, &hahsed_keys, &encoded_values, &[]).ok(); } storage_keys.push(hahsed_keys); @@ -465,4 +465,68 @@ impl PeerChannels { }) .flatten() } + + /// Requests a single storage range for an accouns given its hashed address and storage root, and the root of its state trie + /// This is a simplified version of `request_storage_range` meant to be used for large tries that require their own single requests + /// account_hashes & storage_roots must have the same length + /// storage_root must not be an empty trie hash, we will treat empty ranges as invalid responses + /// Returns true if the account's storage was not completely fetched by the request + /// Returns the list of hashed storage keys and values for the account's storage or None if: + /// - There are no available peers (the node just started up or was rejected by all other nodes) + /// - The response timed out + /// - The response was empty or not valid + pub async fn request_storage_range( + &self, + state_root: H256, + storage_root: H256, + account_hash: H256, + start: H256, + ) -> Option<(Vec, Vec, bool)> { + let request_id = rand::random(); + let request = RLPxMessage::GetStorageRanges(GetStorageRanges { + id: request_id, + root_hash: state_root, + account_hashes: vec![account_hash], + starting_hash: start, + limit_hash: HASH_MAX, + response_bytes: MAX_RESPONSE_BYTES, + }); + let mut receiver = self.receiver.lock().await; + self.sender.send(request).await.ok()?; + let (mut slots, proof) = tokio::time::timeout(PEER_REPLY_TIMOUT, async move { + loop { + match receiver.recv().await { + Some(RLPxMessage::StorageRanges(StorageRanges { id, slots, proof })) + if id == request_id => + { + return Some((slots, proof)) + } + // Ignore replies that don't match the expected id (such as late responses) + Some(_) => continue, + None => return None, + } + } + }) + .await + .ok()??; + // Check we got a reasonable amount of storage ranges + if slots.len() != 1 { + return None; + } + // Unzip & validate response + let proof = encodable_to_proof(&proof); + let (storage_keys, storage_values): (Vec, Vec) = slots + .remove(0) + .into_iter() + .map(|slot| (slot.hash, slot.data)) + .unzip(); + let encoded_values = storage_values + .iter() + .map(|val| val.encode_to_vec()) + .collect::>(); + // Verify storage range + let should_continue = + verify_range(storage_root, &start, &storage_keys, &encoded_values, &proof).ok()?; + Some((storage_keys, storage_values, should_continue)) + } } diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 6af7b98b93..8fc1345320 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -1,7 +1,7 @@ use ethrex_blockchain::error::ChainError; use ethrex_core::{ types::{AccountState, Block, BlockHash, EMPTY_KECCACK_HASH}, - H256, + H256, U256, }; use ethrex_rlp::{decode::RLPDecode, encode::RLPEncode, error::RLPDecodeError}; use ethrex_storage::{error::StoreError, Store}; @@ -444,8 +444,8 @@ async fn rebuild_state_trie( retry_count += 1; } } - if retry_count >= MAX_RETRIES { - return Err(SyncError::StalePivot) + if retry_count > MAX_RETRIES { + return Err(SyncError::StalePivot); } info!("Account Trie fully fetched, signaling storage fetcher process"); // Send empty batch to signal that no more batches are incoming @@ -549,7 +549,10 @@ async fn storage_fetcher( let awaiting_batch = Instant::now(); match receiver.recv().await { Some(account_hashes_and_roots) if !account_hashes_and_roots.is_empty() => { - info!("Spent {} secs waiting for incoming batch", awaiting_batch.elapsed().as_secs()); + info!( + "Spent {} secs waiting for incoming batch", + awaiting_batch.elapsed().as_secs() + ); pending_storage.extend(account_hashes_and_roots); info!( "Received incoming storage range request, current batch: {}/{BATCH_SIZE}", @@ -577,7 +580,12 @@ async fn storage_fetcher( let remaining_size = remaining.len(); // Add unfeched bytecodes back to the queue pending_storage.extend(remaining); - info!("Processed Batch of size {} with {} remaing in {} secs", batch_size, remaining_size, now.elapsed().as_secs()) + info!( + "Processed Batch of size {} with {} remaing in {} secs", + batch_size, + remaining_size, + now.elapsed().as_secs() + ) } info!("Finished processing current batches"); } @@ -604,12 +612,27 @@ async fn fetch_storage_batch( .await { info!("Received {} storage ranges", keys.len()); - let mut _last_range; - // Hold on to the last batch (if incomplete) + // Handle incomplete ranges if incomplete { info!("Last element in batch was not completely fetched"); - // An incomplete range cannot be empty - _last_range = (keys.pop().unwrap(), values.pop().unwrap()); + // If only one incomplete range is returned then it must belong to a trie that is too big to fit into one request + // We will handle this large trie separately + if keys.len() == 1 { + // An incomplete range cannot be empty + let (keys, values) = (keys.pop().unwrap(), values.pop().unwrap()); + let (account_hash, storage_root) = batch.remove(0); + handle_large_storage_range( + state_root, + account_hash, + storage_root, + keys, + values, + peers.clone(), + store.clone(), + ) + .await?; + } + // The incomplete range is not the first, we cannot asume it is a large trie, so lets add it back to the queue } // Store the storage ranges & rebuild the storage trie for each account for (keys, values) in keys.into_iter().zip(values.into_iter()) { @@ -622,18 +645,68 @@ async fn fetch_storage_batch( warn!("State sync failed for storage root {storage_root}"); } } - // TODO: if the last range is incomplete add it to the incomplete batches queue - // For now we will fetch the full range again // Return remaining code hashes in the batch if we couldn't fetch all of them return Ok(batch); - } { - return Err(SyncError::StalePivot) } } - // This is a corner case where we fetched an account range for a block but the chain has moved on and the block - // was dropped by the peer's snapshot. We will keep the fetcher alive to avoid errors and stop fetching as from the next account - info!("Pivot became stale but we cannot handle it here, on no!"); - Ok(vec![]) + // Pivot became stale + Err(SyncError::StalePivot) +} + +/// Handles the returned incomplete storage range of a large storage trie and +/// fetches the rest of the trie using single requests +// TODO: Later on this method can be refactored to use a separate queue process +// instead of blocking the current thread for the remainder of the retrieval +async fn handle_large_storage_range( + state_root: H256, + account_hash: H256, + storage_root: H256, + keys: Vec, + values: Vec, + peers: Arc>, + store: Store, +) -> Result<(), SyncError> { + // First process the initial range + // Keep hold of the last key as this will be the first key of the next range + let mut next_key = *keys.last().unwrap(); + let mut current_root = { + let mut trie = store.open_storage_trie(account_hash, *EMPTY_TRIE_HASH); + for (key, value) in keys.into_iter().zip(values.into_iter()) { + trie.insert(key.0.to_vec(), value.encode_to_vec())?; + } + // Compute current root so we can extend this trie later + trie.hash()? + }; + let mut should_continue = true; + // Fetch the remaining range + let mut retry_count = 0; + while should_continue { + while retry_count <= MAX_RETRIES { + let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; + if let Some((keys, values, incomplete)) = peer + .request_storage_range(state_root, storage_root, account_hash, next_key) + .await + { + next_key = *keys.last().unwrap(); + should_continue = incomplete; + let mut trie = store.open_storage_trie(account_hash, current_root); + for (key, value) in keys.into_iter().zip(values.into_iter()) { + trie.insert(key.0.to_vec(), value.encode_to_vec())?; + } + // Compute current root so we can extend this trie later + current_root = trie.hash()?; + } else { + retry_count += 1; + } + } + } + if retry_count > MAX_RETRIES { + return Err(SyncError::StalePivot); + } + if current_root != storage_root { + warn!("State sync failed for storage root {storage_root}"); + } + Ok(()) } /// Heals the trie given its state_root by fetching any missing nodes in it via p2p diff --git a/crates/networking/rpc/engine/fork_choice.rs b/crates/networking/rpc/engine/fork_choice.rs index 53ec50e27f..e448e01863 100644 --- a/crates/networking/rpc/engine/fork_choice.rs +++ b/crates/networking/rpc/engine/fork_choice.rs @@ -6,7 +6,7 @@ use ethrex_blockchain::{ }; use ethrex_core::types::BlockHeader; use serde_json::Value; -use tracing::{info, warn, debug}; +use tracing::{debug, info, warn}; use crate::{ types::{ diff --git a/crates/networking/rpc/eth/block.rs b/crates/networking/rpc/eth/block.rs index a45545d806..76a4cc3013 100644 --- a/crates/networking/rpc/eth/block.rs +++ b/crates/networking/rpc/eth/block.rs @@ -1,7 +1,7 @@ use ethrex_blockchain::find_parent_header; use ethrex_rlp::encode::RLPEncode; use serde_json::Value; -use tracing::{info, debug}; +use tracing::{debug, info}; use crate::{ types::{ diff --git a/crates/networking/rpc/eth/client.rs b/crates/networking/rpc/eth/client.rs index e143e052ac..bcc3ef3349 100644 --- a/crates/networking/rpc/eth/client.rs +++ b/crates/networking/rpc/eth/client.rs @@ -1,5 +1,5 @@ use serde_json::Value; -use tracing::{info, debug}; +use tracing::{debug, info}; use crate::{utils::RpcErr, RpcApiContext, RpcHandler}; From 6c6753fe7e4d9cf7c50cea767dd5dac1e47aae74 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 17:31:42 -0300 Subject: [PATCH 106/189] Trace Handle large storage batch --- crates/networking/p2p/sync.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 8fc1345320..52059a7898 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -614,10 +614,10 @@ async fn fetch_storage_batch( info!("Received {} storage ranges", keys.len()); // Handle incomplete ranges if incomplete { - info!("Last element in batch was not completely fetched"); // If only one incomplete range is returned then it must belong to a trie that is too big to fit into one request // We will handle this large trie separately if keys.len() == 1 { + info!("Large storage trie encountered, handling separately"); // An incomplete range cannot be empty let (keys, values) = (keys.pop().unwrap(), values.pop().unwrap()); let (account_hash, storage_root) = batch.remove(0); @@ -682,6 +682,7 @@ async fn handle_large_storage_range( let mut retry_count = 0; while should_continue { while retry_count <= MAX_RETRIES { + info!("Fetching large storage trie, current key: {}", next_key); let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; if let Some((keys, values, incomplete)) = peer .request_storage_range(state_root, storage_root, account_hash, next_key) @@ -706,6 +707,7 @@ async fn handle_large_storage_range( if current_root != storage_root { warn!("State sync failed for storage root {storage_root}"); } + info!("Completely fetched large storage trie"); Ok(()) } From b67da8e9f0eabfbfa0b6daf5d76ba7368f350a04 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 17:36:18 -0300 Subject: [PATCH 107/189] Fix --- crates/networking/p2p/sync.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 52059a7898..f74ffcc26a 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -696,6 +696,7 @@ async fn handle_large_storage_range( } // Compute current root so we can extend this trie later current_root = trie.hash()?; + break; } else { retry_count += 1; } From a0ef349a4cc976d22e082efd6bb4112c48c80552 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 15 Jan 2025 17:40:44 -0300 Subject: [PATCH 108/189] Fix --- crates/networking/p2p/sync.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index f74ffcc26a..ceb3217c99 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -614,12 +614,12 @@ async fn fetch_storage_batch( info!("Received {} storage ranges", keys.len()); // Handle incomplete ranges if incomplete { + // An incomplete range cannot be empty + let (keys, values) = (keys.pop().unwrap(), values.pop().unwrap()); // If only one incomplete range is returned then it must belong to a trie that is too big to fit into one request // We will handle this large trie separately if keys.len() == 1 { info!("Large storage trie encountered, handling separately"); - // An incomplete range cannot be empty - let (keys, values) = (keys.pop().unwrap(), values.pop().unwrap()); let (account_hash, storage_root) = batch.remove(0); handle_large_storage_range( state_root, From d580dc23beb0c1839b62878ae2b4fc4d2c691e38 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 10:49:48 -0300 Subject: [PATCH 109/189] Update --- crates/networking/rpc/rpc.rs | 2 +- crates/storage/store/engines/in_memory.rs | 12 ++++-------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index d194934b3e..daa5f424a0 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -90,7 +90,7 @@ impl RpcApiContext { Ok(if self.syncer.try_lock().is_ok() { SyncStatus::Active // Check if there is a checkpoint left from a previous aborted sync - } else if self.storage.get_latest_downloaded_header()?.is_some() { + } else if self.storage.get_header_download_checkpoint()?.is_some() { SyncStatus::Pending // No trace of a sync being handled } else { diff --git a/crates/storage/store/engines/in_memory.rs b/crates/storage/store/engines/in_memory.rs index ba0d69aa3a..98c37bc752 100644 --- a/crates/storage/store/engines/in_memory.rs +++ b/crates/storage/store/engines/in_memory.rs @@ -57,12 +57,8 @@ struct ChainData { // Keeps track of the state left by the latest snap attempt #[derive(Default, Debug)] pub struct SnapState { - /// The last block number used as a pivot for snap-sync - last_snap_pivot: u64, /// Latest downloaded block header's hash from a previously aborted sync - last_downloaded_header_hash: Option, - /// Latest downloaded block body's hash from a previously aborted sync - last_downloaded_body_hash: Option, + header_download_checkpoint: Option, } impl Store { @@ -438,16 +434,16 @@ impl StoreEngine for Store { } fn set_header_download_checkpoint(&self, block_hash: BlockHash) -> Result<(), StoreError> { - self.inner().snap_state.last_downloaded_header_hash = Some(block_hash); + self.inner().snap_state.header_download_checkpoint = Some(block_hash); Ok(()) } fn get_header_download_checkpoint(&self) -> Result, StoreError> { - Ok(self.inner().snap_state.last_downloaded_header_hash) + Ok(self.inner().snap_state.header_download_checkpoint) } fn clear_header_download_checkpoint(&self) -> Result<(), StoreError> { - self.inner().snap_state.last_downloaded_header_hash = None; + self.inner().snap_state.header_download_checkpoint = None; Ok(()) } } From 283529a1ad4ef680f567a27fc60b5f31f845a1df Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 11:19:24 -0300 Subject: [PATCH 110/189] Update --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index ceb3217c99..0c16044bdb 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -611,7 +611,7 @@ async fn fetch_storage_batch( .request_storage_ranges(state_root, batch_roots, batch_hahses, H256::zero()) .await { - info!("Received {} storage ranges", keys.len()); + info!("Received {} storage ranges, last batch complete: {}", keys.len(), incomplete); // Handle incomplete ranges if incomplete { // An incomplete range cannot be empty From f8de62bdc6eac170fb9082f4cfaa3bdd95286a6d Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 11:24:21 -0300 Subject: [PATCH 111/189] Update --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 0c16044bdb..98ab001189 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -611,7 +611,7 @@ async fn fetch_storage_batch( .request_storage_ranges(state_root, batch_roots, batch_hahses, H256::zero()) .await { - info!("Received {} storage ranges, last batch complete: {}", keys.len(), incomplete); + info!("Received {} storage ranges, last batch incomplete: {}", keys.len(), incomplete); // Handle incomplete ranges if incomplete { // An incomplete range cannot be empty From d163535946f361707abffde3362f17c3c6f1ade0 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 12:09:02 -0300 Subject: [PATCH 112/189] Store state trie checkpoint --- crates/storage/store/engines/api.rs | 7 +++++++ crates/storage/store/engines/in_memory.rs | 16 ++++++++++++++++ crates/storage/store/engines/libmdbx.rs | 18 ++++++++++++++++++ crates/storage/store/engines/utils.rs | 7 ++++++- 4 files changed, 47 insertions(+), 1 deletion(-) diff --git a/crates/storage/store/engines/api.rs b/crates/storage/store/engines/api.rs index db3f90992d..a781ff2035 100644 --- a/crates/storage/store/engines/api.rs +++ b/crates/storage/store/engines/api.rs @@ -257,4 +257,11 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { fn get_header_download_checkpoint(&self) -> Result, StoreError>; fn clear_header_download_checkpoint(&self) -> Result<(), StoreError>; + + fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError>; + + fn get_state_trie_download_checkpoint(&self) -> Result, StoreError>; + + fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError>; + } diff --git a/crates/storage/store/engines/in_memory.rs b/crates/storage/store/engines/in_memory.rs index 98c37bc752..279c7b0b73 100644 --- a/crates/storage/store/engines/in_memory.rs +++ b/crates/storage/store/engines/in_memory.rs @@ -59,6 +59,8 @@ struct ChainData { pub struct SnapState { /// Latest downloaded block header's hash from a previously aborted sync header_download_checkpoint: Option, + /// Current root hash of the latest State Trie + the last downloaded key + state_trie_download_checkpoint: Option<(H256, H256)> } impl Store { @@ -446,6 +448,20 @@ impl StoreEngine for Store { self.inner().snap_state.header_download_checkpoint = None; Ok(()) } + + fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { + self.inner().snap_state.state_trie_download_checkpoint = Some((current_root, last_key)); + Ok(()) + } + + fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { + Ok(self.inner().snap_state.state_trie_download_checkpoint) + } + + fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { + self.inner().snap_state.state_trie_download_checkpoint = None; + Ok(()) + } } impl Debug for Store { diff --git a/crates/storage/store/engines/libmdbx.rs b/crates/storage/store/engines/libmdbx.rs index 1e33eb20db..46f7e3f1d5 100644 --- a/crates/storage/store/engines/libmdbx.rs +++ b/crates/storage/store/engines/libmdbx.rs @@ -543,6 +543,24 @@ impl StoreEngine for Store { fn clear_header_download_checkpoint(&self) -> Result<(), StoreError> { self.delete::(SnapStateIndex::HeaderDownloadCheckpoint) } + + fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { + self.write::( + SnapStateIndex::StateTrieDownloadCheckpoint, + (current_root, last_key).encode_to_vec(), + ) + } + + fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { + self.read::(SnapStateIndex::StateTrieDownloadCheckpoint)? + .map(|ref h| <(H256, H256)>::decode(h)) + .transpose() + .map_err(StoreError::RLPDecode) + } + + fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { + self.delete::(SnapStateIndex::StateTrieDownloadCheckpoint) + } } impl Debug for Store { diff --git a/crates/storage/store/engines/utils.rs b/crates/storage/store/engines/utils.rs index cc041bedc1..87cee53b9b 100644 --- a/crates/storage/store/engines/utils.rs +++ b/crates/storage/store/engines/utils.rs @@ -36,12 +36,14 @@ impl From for ChainDataIndex { } /// Represents the key for each unique value of the snap state stored in the db -// Stores the snap state from previous sync cycles. Currently stores the header download checkpoint +// Stores the snap state from previous sync cycles. Currently stores the header & state trie download checkpoint //, but will later on also include the body download checkpoint and the last pivot used #[derive(Debug, Copy, Clone)] pub enum SnapStateIndex { // Hash of the last downloaded header in a previous sync cycle that was aborted HeaderDownloadCheckpoint = 0, + // Current root hash of the latest State Trie + the last downloaded key + StateTrieDownloadCheckpoint = 1, } impl From for SnapStateIndex { @@ -50,6 +52,9 @@ impl From for SnapStateIndex { x if x == SnapStateIndex::HeaderDownloadCheckpoint as u8 => { SnapStateIndex::HeaderDownloadCheckpoint } + x if x == SnapStateIndex::StateTrieDownloadCheckpoint as u8 => { + SnapStateIndex::StateTrieDownloadCheckpoint + } _ => panic!("Invalid value when casting to SnapDataIndex: {}", value), } } From 9c7b3952aac60d2e01de2ea350563adde38da98b Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 12:23:01 -0300 Subject: [PATCH 113/189] Use checkpoints in state trie download --- crates/networking/p2p/sync.rs | 52 +++++++++++---------------------- crates/storage/store/storage.rs | 12 ++++++++ 2 files changed, 29 insertions(+), 35 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 7e8877b207..384048b367 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -162,12 +162,12 @@ impl SyncManager { // - Fetch each block's body and its receipt via eth p2p requests // - Fetch the pivot block's state via snap p2p requests // - Execute blocks after the pivot (like in full-sync) - let mut pivot_idx = if all_block_hashes.len() > MIN_FULL_BLOCKS { + let pivot_idx = if all_block_hashes.len() > MIN_FULL_BLOCKS { all_block_hashes.len() - MIN_FULL_BLOCKS } else { all_block_hashes.len() - 1 }; - let mut pivot_header = store + let pivot_header = store .get_block_header_by_hash(all_block_hashes[pivot_idx])? .ok_or(SyncError::CorruptDB)?; info!( @@ -179,23 +179,9 @@ impl SyncManager { self.peers.clone(), store.clone(), )); - let mut stale_pivot = - !rebuild_state_trie(pivot_header.state_root, self.peers.clone(), store.clone()) + let stale_pivot = + !rebuild_state_trie(pivot_header.state_root, self.peers.clone(), store.clone(), store.get_state_trie_download_checkpoint()?) .await?; - // If the pivot became stale, set a further pivot and try again - if stale_pivot && pivot_idx != all_block_hashes.len() - 1 { - warn!("Stale pivot, switching to newer head"); - pivot_idx = all_block_hashes.len() - 1; - pivot_header = store - .get_block_header_by_hash(all_block_hashes[pivot_idx])? - .ok_or(SyncError::CorruptDB)?; - stale_pivot = !rebuild_state_trie( - pivot_header.state_root, - self.peers.clone(), - store.clone(), - ) - .await?; - } if stale_pivot { warn!("Stale pivot, aborting sync"); return Ok(()); @@ -320,12 +306,15 @@ async fn store_receipts( Ok(()) } -/// Rebuilds a Block's state trie by requesting snap state from peers, also performs state healing +/// Rebuilds a Block's state trie by requesting snap state from peers, also performs state healing (TODO) +/// Receives an optional checkpoint in case there was a previous snap sync process that became stale, in which +/// case it will continue from the checkpoint and then apply healing to fix inconsistencies with the older state /// Returns true if all state was fetched or false if the block is too old and the state is no longer available async fn rebuild_state_trie( state_root: H256, peers: Arc>, store: Store, + checkpoint: Option<(H256, H256)>, ) -> Result { info!("Rebuilding State Trie"); // Spawn storage & bytecode fetchers @@ -342,10 +331,10 @@ async fn rebuild_state_trie( store.clone(), state_root, )); - let mut start_account_hash = H256::zero(); - // Start from an empty state trie + // Resume download from checkpoint if available or start from an empty trie // We cannot keep an open trie here so we will track the root between lookups - let mut current_state_root = *EMPTY_TRIE_HASH; + let (mut current_state_root, mut start_account_hash) = checkpoint.unwrap_or((H256::zero(), *EMPTY_TRIE_HASH)); + info!("Starting/Resuming state trie download from key {start_account_hash}"); // Fetch Account Ranges // If we reached the maximum amount of retries then it means the state we are requesting is probably old and no longer available let mut retry_count = 0; @@ -413,29 +402,22 @@ async fn rebuild_state_trie( } } if retry_count > MAX_RETRIES { + // Store current checkpoint + store.set_state_trie_download_checkpoint(current_state_root, start_account_hash)?; return Err(SyncError::StalePivot); } info!("Account Trie fully fetched, signaling storage fetcher process"); // Send empty batch to signal that no more batches are incoming storage_sender.send(vec![]).await?; storage_fetcher_handle.await??; - info!("Current State Root: {current_state_root} vs Expected Root: {state_root}"); - let sync_complete = if current_state_root == state_root { - info!("Completed state sync for state root {state_root}"); - true - } else { - info!("Oh no! Trie needs healing"); - info!("Skipping state healing"); - true - // Perform state healing to fix any potential inconsistency in the rebuilt tries - // As we are not fetching different chunks of the same trie this step is not necessary - //heal_state_trie(bytecode_sender.clone(), state_root, store, peers).await? - }; + // Perform state healing to fix inconsistencies with older state + info!("Healing"); + let res = heal_state_trie(bytecode_sender.clone(), state_root, store.clone(), peers.clone()).await?; // Send empty batch to signal that no more batches are incoming info!("Account Trie fully rebuilt, signaling bytecode fetcher process"); bytecode_sender.send(vec![]).await?; bytecode_fetcher_handle.await??; - Ok(sync_complete) + Ok(res) } /// Waits for incoming code hashes from the receiver channel endpoint, queues them, and fetches and stores their bytecodes in batches diff --git a/crates/storage/store/storage.rs b/crates/storage/store/storage.rs index 81764749ca..1f9c050a44 100644 --- a/crates/storage/store/storage.rs +++ b/crates/storage/store/storage.rs @@ -1011,6 +1011,18 @@ impl Store { pub fn clear_header_download_checkpoint(&self) -> Result<(), StoreError> { self.engine.clear_header_download_checkpoint() } + + pub fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { + self.engine.set_state_trie_download_checkpoint(current_root, last_key) + } + + pub fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { + self.engine.get_state_trie_download_checkpoint() + } + + pub fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { + self.engine.clear_state_trie_download_checkpoint() + } } pub fn hash_address(address: &Address) -> Vec { From e0d3fa7c363f2f25b93ca6ddb67e03e00a74a5ad Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 12:28:31 -0300 Subject: [PATCH 114/189] Debug --- crates/storage/store/engines/libmdbx.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/storage/store/engines/libmdbx.rs b/crates/storage/store/engines/libmdbx.rs index 46f7e3f1d5..f8620ddfbc 100644 --- a/crates/storage/store/engines/libmdbx.rs +++ b/crates/storage/store/engines/libmdbx.rs @@ -552,10 +552,9 @@ impl StoreEngine for Store { } fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { - self.read::(SnapStateIndex::StateTrieDownloadCheckpoint)? + Ok(self.read::(SnapStateIndex::StateTrieDownloadCheckpoint)? .map(|ref h| <(H256, H256)>::decode(h)) - .transpose() - .map_err(StoreError::RLPDecode) + .transpose().unwrap()) } fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { From 495bbbe3559e28e257c9ea8ec26ba80b60e11a0b Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 12:48:53 -0300 Subject: [PATCH 115/189] Debug --- crates/storage/store/engines/libmdbx.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/storage/store/engines/libmdbx.rs b/crates/storage/store/engines/libmdbx.rs index f8620ddfbc..b9f2535d50 100644 --- a/crates/storage/store/engines/libmdbx.rs +++ b/crates/storage/store/engines/libmdbx.rs @@ -547,14 +547,15 @@ impl StoreEngine for Store { fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { self.write::( SnapStateIndex::StateTrieDownloadCheckpoint, - (current_root, last_key).encode_to_vec(), + dbg!((current_root, last_key).encode_to_vec()), ) } fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { - Ok(self.read::(SnapStateIndex::StateTrieDownloadCheckpoint)? - .map(|ref h| <(H256, H256)>::decode(h)) - .transpose().unwrap()) + self.read::(SnapStateIndex::StateTrieDownloadCheckpoint)? + .map(|ref h| <(H256, H256)>::decode(dbg!(h))) + .transpose() + .map_err(StoreError::RLPDecode) } fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { From ec548e3080a5ddcff439fbb0687bc1ea34a3d0c3 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 12:51:26 -0300 Subject: [PATCH 116/189] Debug --- cmd/ethrex/ethrex.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index 10dec719b6..4f1572d31a 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -146,6 +146,8 @@ async fn main() { } } + store.clear_state_trie_download_checkpoint(); + let genesis = read_genesis_file(&network); store .add_initial_state(genesis.clone()) From b38fe36496df6f7b8f390233c3df38950f243b9d Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 13:01:48 -0300 Subject: [PATCH 117/189] Fix --- cmd/ethrex/ethrex.rs | 2 -- crates/networking/p2p/sync.rs | 2 +- crates/storage/store/engines/libmdbx.rs | 4 ++-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index 4f1572d31a..10dec719b6 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -146,8 +146,6 @@ async fn main() { } } - store.clear_state_trie_download_checkpoint(); - let genesis = read_genesis_file(&network); store .add_initial_state(genesis.clone()) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 384048b367..2f0e534fb7 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -333,7 +333,7 @@ async fn rebuild_state_trie( )); // Resume download from checkpoint if available or start from an empty trie // We cannot keep an open trie here so we will track the root between lookups - let (mut current_state_root, mut start_account_hash) = checkpoint.unwrap_or((H256::zero(), *EMPTY_TRIE_HASH)); + let (mut current_state_root, mut start_account_hash) = checkpoint.unwrap_or((*EMPTY_TRIE_HASH, H256::zero())); info!("Starting/Resuming state trie download from key {start_account_hash}"); // Fetch Account Ranges // If we reached the maximum amount of retries then it means the state we are requesting is probably old and no longer available diff --git a/crates/storage/store/engines/libmdbx.rs b/crates/storage/store/engines/libmdbx.rs index b9f2535d50..46f7e3f1d5 100644 --- a/crates/storage/store/engines/libmdbx.rs +++ b/crates/storage/store/engines/libmdbx.rs @@ -547,13 +547,13 @@ impl StoreEngine for Store { fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { self.write::( SnapStateIndex::StateTrieDownloadCheckpoint, - dbg!((current_root, last_key).encode_to_vec()), + (current_root, last_key).encode_to_vec(), ) } fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { self.read::(SnapStateIndex::StateTrieDownloadCheckpoint)? - .map(|ref h| <(H256, H256)>::decode(dbg!(h))) + .map(|ref h| <(H256, H256)>::decode(h)) .transpose() .map_err(StoreError::RLPDecode) } From 340ce4e70a2c5426ceeee313c7027be94cf95e3b Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 16:13:16 -0300 Subject: [PATCH 118/189] Add storage handling of pending healing lists --- crates/networking/p2p/sync.rs | 4 +- crates/storage/store/engines/api.rs | 29 ++++++++++++-- crates/storage/store/engines/in_memory.rs | 48 +++++++++++++++++++---- crates/storage/store/engines/libmdbx.rs | 44 +++++++++++++++++++-- crates/storage/store/engines/utils.rs | 10 +++++ crates/storage/store/storage.rs | 12 +++--- 6 files changed, 123 insertions(+), 24 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 2f0e534fb7..3daf7b8530 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -180,7 +180,7 @@ impl SyncManager { store.clone(), )); let stale_pivot = - !rebuild_state_trie(pivot_header.state_root, self.peers.clone(), store.clone(), store.get_state_trie_download_checkpoint()?) + !rebuild_state_trie(pivot_header.state_root, self.peers.clone(), store.clone(), store.get_state_download_checkpoint()?) .await?; if stale_pivot { warn!("Stale pivot, aborting sync"); @@ -403,7 +403,7 @@ async fn rebuild_state_trie( } if retry_count > MAX_RETRIES { // Store current checkpoint - store.set_state_trie_download_checkpoint(current_state_root, start_account_hash)?; + store.set_state_download_checkpoint(current_state_root, start_account_hash)?; return Err(SyncError::StalePivot); } info!("Account Trie fully fetched, signaling storage fetcher process"); diff --git a/crates/storage/store/engines/api.rs b/crates/storage/store/engines/api.rs index a781ff2035..9a115cf326 100644 --- a/crates/storage/store/engines/api.rs +++ b/crates/storage/store/engines/api.rs @@ -7,7 +7,7 @@ use ethrex_core::types::{ use std::{fmt::Debug, panic::RefUnwindSafe}; use crate::error::StoreError; -use ethrex_trie::Trie; +use ethrex_trie::{Nibbles, Trie}; pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { /// Add block header @@ -252,16 +252,37 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { // Snap State methods + // Header Download Checkpoint + fn set_header_download_checkpoint(&self, block_hash: BlockHash) -> Result<(), StoreError>; fn get_header_download_checkpoint(&self) -> Result, StoreError>; fn clear_header_download_checkpoint(&self) -> Result<(), StoreError>; - fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError>; + // State Trie Download Checkpoint + + fn set_state_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError>; + + fn get_state_download_checkpoint(&self) -> Result, StoreError>; + + fn clear_state_download_checkpoint(&self) -> Result<(), StoreError>; + + // State heal pending + + fn set_state_heal_pending(&self, paths: Vec) -> Result<(), StoreError>; + + fn get_state_heal_pending(&self) -> Result>, StoreError>; + + fn clear_state_heal_pending(&self) -> Result<(), StoreError>; + + // Storage heal pending + + fn set_storage_heal_pending(&self, paths: Vec<(H256, Nibbles)>) -> Result<(), StoreError>; + + fn get_storage_heal_pending(&self) -> Result>, StoreError>; - fn get_state_trie_download_checkpoint(&self) -> Result, StoreError>; + fn clear_storage_heal_pending(&self) -> Result<(), StoreError>; - fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError>; } diff --git a/crates/storage/store/engines/in_memory.rs b/crates/storage/store/engines/in_memory.rs index 279c7b0b73..e4c07d45c3 100644 --- a/crates/storage/store/engines/in_memory.rs +++ b/crates/storage/store/engines/in_memory.rs @@ -4,7 +4,7 @@ use ethereum_types::{H256, U256}; use ethrex_core::types::{ BlobsBundle, Block, BlockBody, BlockHash, BlockHeader, BlockNumber, ChainConfig, Index, Receipt, }; -use ethrex_trie::{InMemoryTrieDB, Trie}; +use ethrex_trie::{InMemoryTrieDB, Nibbles, Trie}; use std::{ collections::HashMap, fmt::Debug, @@ -60,7 +60,11 @@ pub struct SnapState { /// Latest downloaded block header's hash from a previously aborted sync header_download_checkpoint: Option, /// Current root hash of the latest State Trie + the last downloaded key - state_trie_download_checkpoint: Option<(H256, H256)> + state_download_checkpoint: Option<(H256, H256)>, + /// State trie paths that were left in the healing queue of a previous sync cycle + state_heal_pending: Option>, + /// Storage trie paths that were left in the healing queue of a previous sync cycle + storage_heal_pending: Option>, } impl Store { @@ -449,17 +453,45 @@ impl StoreEngine for Store { Ok(()) } - fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { - self.inner().snap_state.state_trie_download_checkpoint = Some((current_root, last_key)); + fn set_state_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { + self.inner().snap_state.state_download_checkpoint = Some((current_root, last_key)); Ok(()) } - fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { - Ok(self.inner().snap_state.state_trie_download_checkpoint) + fn get_state_download_checkpoint(&self) -> Result, StoreError> { + Ok(self.inner().snap_state.state_download_checkpoint) } - fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { - self.inner().snap_state.state_trie_download_checkpoint = None; + fn clear_state_download_checkpoint(&self) -> Result<(), StoreError> { + self.inner().snap_state.state_download_checkpoint = None; + Ok(()) + } + + fn set_state_heal_pending(&self, paths: Vec) -> Result<(), StoreError> { + self.inner().snap_state.state_heal_pending = Some(paths); + Ok(()) + } + + fn get_state_heal_pending(&self) -> Result>, StoreError> { + Ok(self.inner().snap_state.state_heal_pending.clone()) + } + + fn clear_state_heal_pending(&self) -> Result<(), StoreError> { + self.inner().snap_state.state_heal_pending = None; + Ok(()) + } + + fn set_storage_heal_pending(&self, paths: Vec<(H256, Nibbles)>) -> Result<(), StoreError> { + self.inner().snap_state.storage_heal_pending = Some(paths); + Ok(()) + } + + fn get_storage_heal_pending(&self) -> Result>, StoreError> { + Ok(self.inner().snap_state.storage_heal_pending.clone()) + } + + fn clear_storage_heal_pending(&self) -> Result<(), StoreError> { + self.inner().snap_state.storage_heal_pending = None; Ok(()) } } diff --git a/crates/storage/store/engines/libmdbx.rs b/crates/storage/store/engines/libmdbx.rs index 46f7e3f1d5..c6274d8c6e 100644 --- a/crates/storage/store/engines/libmdbx.rs +++ b/crates/storage/store/engines/libmdbx.rs @@ -14,7 +14,7 @@ use ethrex_core::types::{ }; use ethrex_rlp::decode::RLPDecode; use ethrex_rlp::encode::RLPEncode; -use ethrex_trie::{LibmdbxDupsortTrieDB, LibmdbxTrieDB, Trie}; +use ethrex_trie::{LibmdbxDupsortTrieDB, LibmdbxTrieDB, Nibbles, Trie}; use libmdbx::orm::{Decodable, Encodable, Table}; use libmdbx::{ dupsort, @@ -544,23 +544,59 @@ impl StoreEngine for Store { self.delete::(SnapStateIndex::HeaderDownloadCheckpoint) } - fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { + fn set_state_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { self.write::( SnapStateIndex::StateTrieDownloadCheckpoint, (current_root, last_key).encode_to_vec(), ) } - fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { + fn get_state_download_checkpoint(&self) -> Result, StoreError> { self.read::(SnapStateIndex::StateTrieDownloadCheckpoint)? .map(|ref h| <(H256, H256)>::decode(h)) .transpose() .map_err(StoreError::RLPDecode) } - fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { + fn clear_state_download_checkpoint(&self) -> Result<(), StoreError> { self.delete::(SnapStateIndex::StateTrieDownloadCheckpoint) } + + fn set_state_heal_pending(&self, paths: Vec) -> Result<(), StoreError> { + self.write::( + SnapStateIndex::StateTrieHealPending, + paths.encode_to_vec(), + ) + } + + fn get_state_heal_pending(&self) -> Result>, StoreError> { + self.read::(SnapStateIndex::StateTrieHealPending)? + .map(|ref h| >::decode(h)) + .transpose() + .map_err(StoreError::RLPDecode) + } + + fn clear_state_heal_pending(&self) -> Result<(), StoreError> { + self.delete::(SnapStateIndex::StateTrieHealPending) + } + + fn set_storage_heal_pending(&self, paths: Vec<(H256, Nibbles)>) -> Result<(), StoreError> { + self.write::( + SnapStateIndex::StorageTrieHealPending, + paths.encode_to_vec(), + ) + } + + fn get_storage_heal_pending(&self) -> Result>, StoreError> { + self.read::(SnapStateIndex::StorageTrieHealPending)? + .map(|ref h| >::decode(h)) + .transpose() + .map_err(StoreError::RLPDecode) + } + + fn clear_storage_heal_pending(&self) -> Result<(), StoreError> { + self.delete::(SnapStateIndex::StorageTrieHealPending) + } } impl Debug for Store { diff --git a/crates/storage/store/engines/utils.rs b/crates/storage/store/engines/utils.rs index 87cee53b9b..6ee749ff6f 100644 --- a/crates/storage/store/engines/utils.rs +++ b/crates/storage/store/engines/utils.rs @@ -44,6 +44,10 @@ pub enum SnapStateIndex { HeaderDownloadCheckpoint = 0, // Current root hash of the latest State Trie + the last downloaded key StateTrieDownloadCheckpoint = 1, + // State trie paths that were left in the healing queue of a previous sync cycle + StateTrieHealPending = 2, + // Storage trie paths that were left in the healing queue of a previous sync cycle + StorageTrieHealPending = 3, } impl From for SnapStateIndex { @@ -55,6 +59,12 @@ impl From for SnapStateIndex { x if x == SnapStateIndex::StateTrieDownloadCheckpoint as u8 => { SnapStateIndex::StateTrieDownloadCheckpoint } + x if x == SnapStateIndex::StateTrieHealPending as u8 => { + SnapStateIndex::StateTrieHealPending + } + x if x == SnapStateIndex::StorageTrieHealPending as u8 => { + SnapStateIndex::StorageTrieHealPending + } _ => panic!("Invalid value when casting to SnapDataIndex: {}", value), } } diff --git a/crates/storage/store/storage.rs b/crates/storage/store/storage.rs index 1f9c050a44..c240351af8 100644 --- a/crates/storage/store/storage.rs +++ b/crates/storage/store/storage.rs @@ -1012,16 +1012,16 @@ impl Store { self.engine.clear_header_download_checkpoint() } - pub fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { - self.engine.set_state_trie_download_checkpoint(current_root, last_key) + pub fn set_state_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { + self.engine.set_state_download_checkpoint(current_root, last_key) } - pub fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { - self.engine.get_state_trie_download_checkpoint() + pub fn get_state_download_checkpoint(&self) -> Result, StoreError> { + self.engine.get_state_download_checkpoint() } - pub fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { - self.engine.clear_state_trie_download_checkpoint() + pub fn clear_state_download_checkpoint(&self) -> Result<(), StoreError> { + self.engine.clear_state_download_checkpoint() } } From 3dc9b1341f1e1e6e314c322176c5272edb3f46c1 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 16:18:24 -0300 Subject: [PATCH 119/189] Revert "Add storage handling of pending healing lists" This reverts commit 340ce4e70a2c5426ceeee313c7027be94cf95e3b. --- crates/networking/p2p/sync.rs | 4 +- crates/storage/store/engines/api.rs | 29 ++------------ crates/storage/store/engines/in_memory.rs | 48 ++++------------------- crates/storage/store/engines/libmdbx.rs | 44 ++------------------- crates/storage/store/engines/utils.rs | 10 ----- crates/storage/store/storage.rs | 12 +++--- 6 files changed, 24 insertions(+), 123 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 3daf7b8530..2f0e534fb7 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -180,7 +180,7 @@ impl SyncManager { store.clone(), )); let stale_pivot = - !rebuild_state_trie(pivot_header.state_root, self.peers.clone(), store.clone(), store.get_state_download_checkpoint()?) + !rebuild_state_trie(pivot_header.state_root, self.peers.clone(), store.clone(), store.get_state_trie_download_checkpoint()?) .await?; if stale_pivot { warn!("Stale pivot, aborting sync"); @@ -403,7 +403,7 @@ async fn rebuild_state_trie( } if retry_count > MAX_RETRIES { // Store current checkpoint - store.set_state_download_checkpoint(current_state_root, start_account_hash)?; + store.set_state_trie_download_checkpoint(current_state_root, start_account_hash)?; return Err(SyncError::StalePivot); } info!("Account Trie fully fetched, signaling storage fetcher process"); diff --git a/crates/storage/store/engines/api.rs b/crates/storage/store/engines/api.rs index 9a115cf326..a781ff2035 100644 --- a/crates/storage/store/engines/api.rs +++ b/crates/storage/store/engines/api.rs @@ -7,7 +7,7 @@ use ethrex_core::types::{ use std::{fmt::Debug, panic::RefUnwindSafe}; use crate::error::StoreError; -use ethrex_trie::{Nibbles, Trie}; +use ethrex_trie::Trie; pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { /// Add block header @@ -252,37 +252,16 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { // Snap State methods - // Header Download Checkpoint - fn set_header_download_checkpoint(&self, block_hash: BlockHash) -> Result<(), StoreError>; fn get_header_download_checkpoint(&self) -> Result, StoreError>; fn clear_header_download_checkpoint(&self) -> Result<(), StoreError>; - // State Trie Download Checkpoint - - fn set_state_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError>; - - fn get_state_download_checkpoint(&self) -> Result, StoreError>; - - fn clear_state_download_checkpoint(&self) -> Result<(), StoreError>; - - // State heal pending - - fn set_state_heal_pending(&self, paths: Vec) -> Result<(), StoreError>; - - fn get_state_heal_pending(&self) -> Result>, StoreError>; - - fn clear_state_heal_pending(&self) -> Result<(), StoreError>; - - // Storage heal pending - - fn set_storage_heal_pending(&self, paths: Vec<(H256, Nibbles)>) -> Result<(), StoreError>; - - fn get_storage_heal_pending(&self) -> Result>, StoreError>; + fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError>; - fn clear_storage_heal_pending(&self) -> Result<(), StoreError>; + fn get_state_trie_download_checkpoint(&self) -> Result, StoreError>; + fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError>; } diff --git a/crates/storage/store/engines/in_memory.rs b/crates/storage/store/engines/in_memory.rs index e4c07d45c3..279c7b0b73 100644 --- a/crates/storage/store/engines/in_memory.rs +++ b/crates/storage/store/engines/in_memory.rs @@ -4,7 +4,7 @@ use ethereum_types::{H256, U256}; use ethrex_core::types::{ BlobsBundle, Block, BlockBody, BlockHash, BlockHeader, BlockNumber, ChainConfig, Index, Receipt, }; -use ethrex_trie::{InMemoryTrieDB, Nibbles, Trie}; +use ethrex_trie::{InMemoryTrieDB, Trie}; use std::{ collections::HashMap, fmt::Debug, @@ -60,11 +60,7 @@ pub struct SnapState { /// Latest downloaded block header's hash from a previously aborted sync header_download_checkpoint: Option, /// Current root hash of the latest State Trie + the last downloaded key - state_download_checkpoint: Option<(H256, H256)>, - /// State trie paths that were left in the healing queue of a previous sync cycle - state_heal_pending: Option>, - /// Storage trie paths that were left in the healing queue of a previous sync cycle - storage_heal_pending: Option>, + state_trie_download_checkpoint: Option<(H256, H256)> } impl Store { @@ -453,45 +449,17 @@ impl StoreEngine for Store { Ok(()) } - fn set_state_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { - self.inner().snap_state.state_download_checkpoint = Some((current_root, last_key)); + fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { + self.inner().snap_state.state_trie_download_checkpoint = Some((current_root, last_key)); Ok(()) } - fn get_state_download_checkpoint(&self) -> Result, StoreError> { - Ok(self.inner().snap_state.state_download_checkpoint) + fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { + Ok(self.inner().snap_state.state_trie_download_checkpoint) } - fn clear_state_download_checkpoint(&self) -> Result<(), StoreError> { - self.inner().snap_state.state_download_checkpoint = None; - Ok(()) - } - - fn set_state_heal_pending(&self, paths: Vec) -> Result<(), StoreError> { - self.inner().snap_state.state_heal_pending = Some(paths); - Ok(()) - } - - fn get_state_heal_pending(&self) -> Result>, StoreError> { - Ok(self.inner().snap_state.state_heal_pending.clone()) - } - - fn clear_state_heal_pending(&self) -> Result<(), StoreError> { - self.inner().snap_state.state_heal_pending = None; - Ok(()) - } - - fn set_storage_heal_pending(&self, paths: Vec<(H256, Nibbles)>) -> Result<(), StoreError> { - self.inner().snap_state.storage_heal_pending = Some(paths); - Ok(()) - } - - fn get_storage_heal_pending(&self) -> Result>, StoreError> { - Ok(self.inner().snap_state.storage_heal_pending.clone()) - } - - fn clear_storage_heal_pending(&self) -> Result<(), StoreError> { - self.inner().snap_state.storage_heal_pending = None; + fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { + self.inner().snap_state.state_trie_download_checkpoint = None; Ok(()) } } diff --git a/crates/storage/store/engines/libmdbx.rs b/crates/storage/store/engines/libmdbx.rs index c6274d8c6e..46f7e3f1d5 100644 --- a/crates/storage/store/engines/libmdbx.rs +++ b/crates/storage/store/engines/libmdbx.rs @@ -14,7 +14,7 @@ use ethrex_core::types::{ }; use ethrex_rlp::decode::RLPDecode; use ethrex_rlp::encode::RLPEncode; -use ethrex_trie::{LibmdbxDupsortTrieDB, LibmdbxTrieDB, Nibbles, Trie}; +use ethrex_trie::{LibmdbxDupsortTrieDB, LibmdbxTrieDB, Trie}; use libmdbx::orm::{Decodable, Encodable, Table}; use libmdbx::{ dupsort, @@ -544,59 +544,23 @@ impl StoreEngine for Store { self.delete::(SnapStateIndex::HeaderDownloadCheckpoint) } - fn set_state_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { + fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { self.write::( SnapStateIndex::StateTrieDownloadCheckpoint, (current_root, last_key).encode_to_vec(), ) } - fn get_state_download_checkpoint(&self) -> Result, StoreError> { + fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { self.read::(SnapStateIndex::StateTrieDownloadCheckpoint)? .map(|ref h| <(H256, H256)>::decode(h)) .transpose() .map_err(StoreError::RLPDecode) } - fn clear_state_download_checkpoint(&self) -> Result<(), StoreError> { + fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { self.delete::(SnapStateIndex::StateTrieDownloadCheckpoint) } - - fn set_state_heal_pending(&self, paths: Vec) -> Result<(), StoreError> { - self.write::( - SnapStateIndex::StateTrieHealPending, - paths.encode_to_vec(), - ) - } - - fn get_state_heal_pending(&self) -> Result>, StoreError> { - self.read::(SnapStateIndex::StateTrieHealPending)? - .map(|ref h| >::decode(h)) - .transpose() - .map_err(StoreError::RLPDecode) - } - - fn clear_state_heal_pending(&self) -> Result<(), StoreError> { - self.delete::(SnapStateIndex::StateTrieHealPending) - } - - fn set_storage_heal_pending(&self, paths: Vec<(H256, Nibbles)>) -> Result<(), StoreError> { - self.write::( - SnapStateIndex::StorageTrieHealPending, - paths.encode_to_vec(), - ) - } - - fn get_storage_heal_pending(&self) -> Result>, StoreError> { - self.read::(SnapStateIndex::StorageTrieHealPending)? - .map(|ref h| >::decode(h)) - .transpose() - .map_err(StoreError::RLPDecode) - } - - fn clear_storage_heal_pending(&self) -> Result<(), StoreError> { - self.delete::(SnapStateIndex::StorageTrieHealPending) - } } impl Debug for Store { diff --git a/crates/storage/store/engines/utils.rs b/crates/storage/store/engines/utils.rs index 6ee749ff6f..87cee53b9b 100644 --- a/crates/storage/store/engines/utils.rs +++ b/crates/storage/store/engines/utils.rs @@ -44,10 +44,6 @@ pub enum SnapStateIndex { HeaderDownloadCheckpoint = 0, // Current root hash of the latest State Trie + the last downloaded key StateTrieDownloadCheckpoint = 1, - // State trie paths that were left in the healing queue of a previous sync cycle - StateTrieHealPending = 2, - // Storage trie paths that were left in the healing queue of a previous sync cycle - StorageTrieHealPending = 3, } impl From for SnapStateIndex { @@ -59,12 +55,6 @@ impl From for SnapStateIndex { x if x == SnapStateIndex::StateTrieDownloadCheckpoint as u8 => { SnapStateIndex::StateTrieDownloadCheckpoint } - x if x == SnapStateIndex::StateTrieHealPending as u8 => { - SnapStateIndex::StateTrieHealPending - } - x if x == SnapStateIndex::StorageTrieHealPending as u8 => { - SnapStateIndex::StorageTrieHealPending - } _ => panic!("Invalid value when casting to SnapDataIndex: {}", value), } } diff --git a/crates/storage/store/storage.rs b/crates/storage/store/storage.rs index c240351af8..1f9c050a44 100644 --- a/crates/storage/store/storage.rs +++ b/crates/storage/store/storage.rs @@ -1012,16 +1012,16 @@ impl Store { self.engine.clear_header_download_checkpoint() } - pub fn set_state_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { - self.engine.set_state_download_checkpoint(current_root, last_key) + pub fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { + self.engine.set_state_trie_download_checkpoint(current_root, last_key) } - pub fn get_state_download_checkpoint(&self) -> Result, StoreError> { - self.engine.get_state_download_checkpoint() + pub fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { + self.engine.get_state_trie_download_checkpoint() } - pub fn clear_state_download_checkpoint(&self) -> Result<(), StoreError> { - self.engine.clear_state_download_checkpoint() + pub fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { + self.engine.clear_state_trie_download_checkpoint() } } From 6c40cc5154bfcc7daee06eaf62b1c602ab21c124 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 18:45:00 -0300 Subject: [PATCH 120/189] Allow healing restart --- crates/networking/p2p/sync.rs | 72 ++++++++++++++++------- crates/storage/store/engines/api.rs | 6 ++ crates/storage/store/engines/in_memory.rs | 19 +++++- crates/storage/store/engines/libmdbx.rs | 18 ++++++ crates/storage/store/engines/utils.rs | 5 ++ crates/storage/store/storage.rs | 12 ++++ 6 files changed, 111 insertions(+), 21 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 2f0e534fb7..e16f1ccfb3 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -404,12 +404,21 @@ async fn rebuild_state_trie( if retry_count > MAX_RETRIES { // Store current checkpoint store.set_state_trie_download_checkpoint(current_state_root, start_account_hash)?; - return Err(SyncError::StalePivot); } - info!("Account Trie fully fetched, signaling storage fetcher process"); + info!("Account Trie Fetching ended, signaling storage fetcher process"); // Send empty batch to signal that no more batches are incoming storage_sender.send(vec![]).await?; - storage_fetcher_handle.await??; + let pending_storage_accounts = storage_fetcher_handle.await??; + let pending_storages = pending_storage_accounts.is_empty(); + // Next cycle may have different storage roots for these accounts so we will leave them to healing + if pending_storages { + // (Assumption) If we are still fetching storages then we should have never started healing and have no pending healing accounts to overwrite + store.set_pending_storage_heal_accounts(pending_storage_accounts)?; + } + if retry_count > MAX_RETRIES || pending_storages { + // Skip healing and return stale status + return Ok(false) + } // Perform state healing to fix inconsistencies with older state info!("Healing"); let res = heal_state_trie(bytecode_sender.clone(), state_root, store.clone(), peers.clone()).await?; @@ -481,18 +490,19 @@ async fn fetch_bytecode_batch( /// Waits for incoming account hashes & storage roots from the receiver channel endpoint, queues them, and fetches and stores their bytecodes in batches /// This function will remain active until either an empty vec is sent to the receiver or the pivot becomes stale -/// In the last case, the fetcher will return an internal SyncError::StalePivot error +/// In the last case, the fetcher will return the account hashes of the accounts in the queue async fn storage_fetcher( mut receiver: Receiver>, peers: Arc>, store: Store, state_root: H256, -) -> Result<(), SyncError> { +) -> Result, SyncError> { const BATCH_SIZE: usize = 100; // Pending list of storages to fetch let mut pending_storage: Vec<(H256, H256)> = vec![]; - // TODO: Also add a queue for storages that were incompletely fecthed, - // but for the first iteration we will asume not fully fetched -> fetch again + // The pivot may become stale while the fetcher is active, we will still keep the process + // alive until the end signal so we don't lose queued messages + let mut stale = false; let mut incoming = true; while incoming { // Fetch incoming requests @@ -519,14 +529,19 @@ async fn storage_fetcher( info!("Processing current batches"); // If we have enough pending bytecodes to fill a batch // or if we have no more incoming batches, spawn a fetch process - while pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty() { + // If the pivot became stale don't process anything and just save incoming requests + while !stale && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) { let now = Instant::now(); let next_batch = pending_storage .drain(..BATCH_SIZE.min(pending_storage.len())) .collect::>(); let batch_size = next_batch.len(); let remaining = - fetch_storage_batch(next_batch, state_root, peers.clone(), store.clone()).await?; + match fetch_storage_batch(next_batch.clone(), state_root, peers.clone(), store.clone()).await { + Ok(r) => r, + Err(SyncError::StalePivot) => {stale = true; next_batch}, + Err(err) => return Err(err), + }; let remaining_size = remaining.len(); // Add unfeched bytecodes back to the queue pending_storage.extend(remaining); @@ -539,7 +554,7 @@ async fn storage_fetcher( } info!("Finished processing current batches"); } - Ok(()) + Ok(pending_storage.into_iter().map(|(acc, _)| acc).collect()) } /// Receives a batch of account hashes with their storage roots, fetches their respective storage ranges via p2p and returns a list of the code hashes that couldn't be fetched in the request (if applicable) @@ -681,6 +696,10 @@ async fn heal_state_trie( peers.clone(), store.clone(), )); + // Check if we have pending storages to heal from a previous cycle + if let Some(pending) = store.get_pending_storage_heal_accounts()? { + storage_sender.send(pending).await?; + } // Begin by requesting the root node let mut paths = vec![Nibbles::default()]; // Count the number of request retries so we don't get stuck requesting old state @@ -742,21 +761,30 @@ async fn heal_state_trie( } // Send empty batch to signal that no more batches are incoming storage_sender.send(vec![]).await?; - storage_healer_handler.await??; - Ok(retry_count < MAX_RETRIES) + let pending_storage_heal_accounts = storage_healer_handler.await??; + // Update pending list + let storage_healing_succesful = pending_storage_heal_accounts.is_empty(); + if !storage_healing_succesful { + store.set_pending_storage_heal_accounts(pending_storage_heal_accounts)?; + } + Ok(retry_count < MAX_RETRIES && storage_healing_succesful) } /// Waits for incoming hashed addresses from the receiver channel endpoint and queues the associated root nodes for state retrieval /// Also retrieves their children nodes until we have the full storage trie stored +/// If the state becomes stale while fetching, returns its current queued account hashes async fn storage_healer( state_root: H256, mut receiver: Receiver>, peers: Arc>, store: Store, -) -> Result<(), SyncError> { +) -> Result, SyncError> { const BATCH_SIZE: usize = 200; - // Pending list of bytecodes to fetch + // Pending list of storages to fetch let mut pending_storages: Vec<(H256, Nibbles)> = vec![]; + // The pivot may become stale while the fetcher is active, we will still keep the process + // alive until the end signal so we don't lose queued messages + let mut stale = false; let mut incoming = true; while incoming { // Fetch incoming requests @@ -774,7 +802,8 @@ async fn storage_healer( } // If we have enough pending storages to fill a batch // or if we have no more incoming batches, spawn a fetch process - while pending_storages.len() >= BATCH_SIZE || !incoming && !pending_storages.is_empty() { + // If the pivot became stale don't process anything and just save incoming requests + while !stale && (pending_storages.len() >= BATCH_SIZE || !incoming && !pending_storages.is_empty()) { let mut next_batch: BTreeMap> = BTreeMap::new(); // Group pending storages by account path // We do this here instead of keeping them sorted so we don't prioritize further nodes from the first tries @@ -783,7 +812,11 @@ async fn storage_healer( next_batch.entry(account).or_default().push(path); } let return_batch = - heal_storage_batch(state_root, next_batch, peers.clone(), store.clone()).await?; + match heal_storage_batch(state_root, next_batch.clone(), peers.clone(), store.clone()).await { + Ok(b) => b, + Err(SyncError::StalePivot) => {stale = true; next_batch}, + Err(err) => return Err(err), + }; for (acc_path, paths) in return_batch { for path in paths { pending_storages.push((acc_path, path)); @@ -791,7 +824,7 @@ async fn storage_healer( } } } - Ok(()) + Ok(pending_storages.into_iter().map(|(h, _)| h).collect()) } /// Receives a set of storage trie paths (grouped by their corresponding account's state trie path), @@ -832,9 +865,8 @@ async fn heal_storage_batch( return Ok(batch); } } - // This is a corner case where we fetched an account range for a block but the chain has moved on and the block - // was dropped by the peer's snapshot. We will keep the fetcher alive to avoid errors and stop fetching as from the next account - Ok(BTreeMap::new()) + // Pivot became stale, lets inform the fetcher + Err(SyncError::StalePivot) } /// Returns the partial paths to the node's children if they are not already part of the trie state diff --git a/crates/storage/store/engines/api.rs b/crates/storage/store/engines/api.rs index a781ff2035..53299b7e7c 100644 --- a/crates/storage/store/engines/api.rs +++ b/crates/storage/store/engines/api.rs @@ -264,4 +264,10 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError>; + fn set_pending_storage_heal_accounts(&self, accounts: Vec) -> Result<(), StoreError>; + + fn get_pending_storage_heal_accounts(&self) -> Result>, StoreError>; + + fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError>; + } diff --git a/crates/storage/store/engines/in_memory.rs b/crates/storage/store/engines/in_memory.rs index 279c7b0b73..51d0216a82 100644 --- a/crates/storage/store/engines/in_memory.rs +++ b/crates/storage/store/engines/in_memory.rs @@ -60,7 +60,9 @@ pub struct SnapState { /// Latest downloaded block header's hash from a previously aborted sync header_download_checkpoint: Option, /// Current root hash of the latest State Trie + the last downloaded key - state_trie_download_checkpoint: Option<(H256, H256)> + state_trie_download_checkpoint: Option<(H256, H256)>, + /// Accounts which storage needs healing + pending_storage_heal_accounts: Option>, } impl Store { @@ -462,6 +464,21 @@ impl StoreEngine for Store { self.inner().snap_state.state_trie_download_checkpoint = None; Ok(()) } + + fn set_pending_storage_heal_accounts(&self, accounts: Vec) -> Result<(), StoreError> { + self.inner().snap_state.pending_storage_heal_accounts = Some(accounts); + Ok(()) + } + + fn get_pending_storage_heal_accounts(&self) -> Result>, StoreError> { + Ok(self.inner().snap_state.pending_storage_heal_accounts.clone()) + } + + fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError> { + self.inner().snap_state.pending_storage_heal_accounts = None; + Ok(()) + } + } impl Debug for Store { diff --git a/crates/storage/store/engines/libmdbx.rs b/crates/storage/store/engines/libmdbx.rs index 46f7e3f1d5..a3b8d99294 100644 --- a/crates/storage/store/engines/libmdbx.rs +++ b/crates/storage/store/engines/libmdbx.rs @@ -561,6 +561,24 @@ impl StoreEngine for Store { fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { self.delete::(SnapStateIndex::StateTrieDownloadCheckpoint) } + + fn set_pending_storage_heal_accounts(&self, accounts: Vec) -> Result<(), StoreError> { + self.write::( + SnapStateIndex::PendingStorageHealAccounts, + accounts.encode_to_vec(), + ) + } + + fn get_pending_storage_heal_accounts(&self) -> Result>, StoreError> { + self.read::(SnapStateIndex::PendingStorageHealAccounts)? + .map(|ref h| >::decode(h)) + .transpose() + .map_err(StoreError::RLPDecode) + } + + fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError> { + self.delete::(SnapStateIndex::PendingStorageHealAccounts) + } } impl Debug for Store { diff --git a/crates/storage/store/engines/utils.rs b/crates/storage/store/engines/utils.rs index 87cee53b9b..b4c3820b3e 100644 --- a/crates/storage/store/engines/utils.rs +++ b/crates/storage/store/engines/utils.rs @@ -44,6 +44,8 @@ pub enum SnapStateIndex { HeaderDownloadCheckpoint = 0, // Current root hash of the latest State Trie + the last downloaded key StateTrieDownloadCheckpoint = 1, + // Accounts which storage needs healing + PendingStorageHealAccounts = 2, } impl From for SnapStateIndex { @@ -55,6 +57,9 @@ impl From for SnapStateIndex { x if x == SnapStateIndex::StateTrieDownloadCheckpoint as u8 => { SnapStateIndex::StateTrieDownloadCheckpoint } + x if x == SnapStateIndex::PendingStorageHealAccounts as u8 => { + SnapStateIndex::PendingStorageHealAccounts + } _ => panic!("Invalid value when casting to SnapDataIndex: {}", value), } } diff --git a/crates/storage/store/storage.rs b/crates/storage/store/storage.rs index 1f9c050a44..8e8d6df354 100644 --- a/crates/storage/store/storage.rs +++ b/crates/storage/store/storage.rs @@ -1023,6 +1023,18 @@ impl Store { pub fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { self.engine.clear_state_trie_download_checkpoint() } + + pub fn set_pending_storage_heal_accounts(&self, accounts: Vec) -> Result<(), StoreError> { + self.engine.set_pending_storage_heal_accounts(accounts) + } + + pub fn get_pending_storage_heal_accounts(&self) -> Result>, StoreError> { + self.engine.get_pending_storage_heal_accounts() + } + + pub fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError> { + self.engine.clear_pending_storage_heal_accounts() + } } pub fn hash_address(address: &Address) -> Vec { From 85de4364b1ab171abe3e0f6855d952e14d240689 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 18:50:39 -0300 Subject: [PATCH 121/189] Allow healing restart --- crates/networking/p2p/sync.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index e16f1ccfb3..8041da6240 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -554,6 +554,7 @@ async fn storage_fetcher( } info!("Finished processing current batches"); } + info!("Concluding storage fetcher, {} storages left in queue to be healed later", pending_storage.len()); Ok(pending_storage.into_iter().map(|(acc, _)| acc).collect()) } @@ -698,6 +699,7 @@ async fn heal_state_trie( )); // Check if we have pending storages to heal from a previous cycle if let Some(pending) = store.get_pending_storage_heal_accounts()? { + info!("Retrieved {} pending storage healing requests", pending.len()); storage_sender.send(pending).await?; } // Begin by requesting the root node @@ -759,12 +761,14 @@ async fn heal_state_trie( retry_count += 1; } } + info!("State Healing stopped, signaling storage healer"); // Send empty batch to signal that no more batches are incoming storage_sender.send(vec![]).await?; let pending_storage_heal_accounts = storage_healer_handler.await??; // Update pending list let storage_healing_succesful = pending_storage_heal_accounts.is_empty(); if !storage_healing_succesful { + info!("{} storages with pending healing", pending_storage_heal_accounts.len()); store.set_pending_storage_heal_accounts(pending_storage_heal_accounts)?; } Ok(retry_count < MAX_RETRIES && storage_healing_succesful) @@ -796,6 +800,11 @@ async fn storage_healer( .into_iter() .map(|acc_path| (acc_path, Nibbles::default())), ); + info!( + "Received incoming storage heal request, current batch: {}/{BATCH_SIZE}", + pending_storages.len() + ); + info!("Number of messages in receiver: {}", receiver.len()); } // Disconnect / Empty message signaling no more bytecodes to sync _ => incoming = false, From ad227120c287f6cf418560b0c025f7c025d93ca5 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 18:51:24 -0300 Subject: [PATCH 122/189] Allow healing restart --- crates/networking/p2p/sync.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 8041da6240..7d4f568cf3 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -201,6 +201,8 @@ impl SyncManager { self.last_snap_pivot = pivot_header.number; // Finished a sync cycle without aborting halfway, clear current checkpoint store.clear_header_download_checkpoint()?; + store.clear_state_trie_download_checkpoint()?; + store.clear_pending_storage_heal_accounts()?; // Next sync will be full-sync self.sync_mode = SyncMode::Full; } From f999957707be1abae9a3bd503b8b97c4fa96b5bf Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 19:00:34 -0300 Subject: [PATCH 123/189] Fix --- crates/networking/p2p/sync.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 7d4f568cf3..eb892aa50f 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -587,7 +587,7 @@ async fn fetch_storage_batch( // Handle incomplete ranges if incomplete { // An incomplete range cannot be empty - let (keys, values) = (keys.pop().unwrap(), values.pop().unwrap()); + let (last_keys, last_values) = (keys.pop().unwrap(), values.pop().unwrap()); // If only one incomplete range is returned then it must belong to a trie that is too big to fit into one request // We will handle this large trie separately if keys.len() == 1 { @@ -597,8 +597,8 @@ async fn fetch_storage_batch( state_root, account_hash, storage_root, - keys, - values, + last_keys, + last_values, peers.clone(), store.clone(), ) From dc1466385ac329603440210be2176871de0194c8 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 16 Jan 2025 19:04:55 -0300 Subject: [PATCH 124/189] Fix --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index eb892aa50f..5f7497eafe 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -590,7 +590,7 @@ async fn fetch_storage_batch( let (last_keys, last_values) = (keys.pop().unwrap(), values.pop().unwrap()); // If only one incomplete range is returned then it must belong to a trie that is too big to fit into one request // We will handle this large trie separately - if keys.len() == 1 { + if keys.is_empty() { info!("Large storage trie encountered, handling separately"); let (account_hash, storage_root) = batch.remove(0); handle_large_storage_range( From 2f9bfaee69f40f4183c1f8352f92a96f2e34ca25 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 12:48:00 -0300 Subject: [PATCH 125/189] Healing: Dont write nodes only leaf values, may be slower but we wont end up with inconsistencies upon restarts --- crates/networking/p2p/sync.rs | 88 +++++++++++++---------- crates/storage/store/engines/api.rs | 12 +++- crates/storage/store/engines/in_memory.rs | 32 ++++++--- crates/storage/store/engines/libmdbx.rs | 34 ++++++--- crates/storage/store/engines/utils.rs | 13 ++-- crates/storage/store/storage.rs | 23 +++--- 6 files changed, 130 insertions(+), 72 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 5f7497eafe..414f96af6e 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -180,7 +180,7 @@ impl SyncManager { store.clone(), )); let stale_pivot = - !rebuild_state_trie(pivot_header.state_root, self.peers.clone(), store.clone(), store.get_state_trie_download_checkpoint()?) + !rebuild_state_trie(pivot_header.state_root, self.peers.clone(), store.clone()) .await?; if stale_pivot { warn!("Stale pivot, aborting sync"); @@ -200,9 +200,7 @@ impl SyncManager { } self.last_snap_pivot = pivot_header.number; // Finished a sync cycle without aborting halfway, clear current checkpoint - store.clear_header_download_checkpoint()?; - store.clear_state_trie_download_checkpoint()?; - store.clear_pending_storage_heal_accounts()?; + store.clear_snap_state()?; // Next sync will be full-sync self.sync_mode = SyncMode::Full; } @@ -316,7 +314,6 @@ async fn rebuild_state_trie( state_root: H256, peers: Arc>, store: Store, - checkpoint: Option<(H256, H256)>, ) -> Result { info!("Rebuilding State Trie"); // Spawn storage & bytecode fetchers @@ -335,7 +332,8 @@ async fn rebuild_state_trie( )); // Resume download from checkpoint if available or start from an empty trie // We cannot keep an open trie here so we will track the root between lookups - let (mut current_state_root, mut start_account_hash) = checkpoint.unwrap_or((*EMPTY_TRIE_HASH, H256::zero())); + let mut current_state_root = store.get_state_trie_root_checkpoint()?.unwrap_or(*EMPTY_TRIE_HASH); + let mut start_account_hash = store.get_state_trie_key_checkpoint()?.unwrap_or_default(); info!("Starting/Resuming state trie download from key {start_account_hash}"); // Fetch Account Ranges // If we reached the maximum amount of retries then it means the state we are requesting is probably old and no longer available @@ -405,7 +403,8 @@ async fn rebuild_state_trie( } if retry_count > MAX_RETRIES { // Store current checkpoint - store.set_state_trie_download_checkpoint(current_state_root, start_account_hash)?; + store.set_state_trie_root_checkpoint(current_state_root)?; + store.set_state_trie_key_checkpoint(start_account_hash)?; } info!("Account Trie Fetching ended, signaling storage fetcher process"); // Send empty batch to signal that no more batches are incoming @@ -423,7 +422,7 @@ async fn rebuild_state_trie( } // Perform state healing to fix inconsistencies with older state info!("Healing"); - let res = heal_state_trie(bytecode_sender.clone(), state_root, store.clone(), peers.clone()).await?; + let res = heal_state_trie(bytecode_sender.clone(), state_root, current_state_root, store.clone(), peers.clone()).await?; // Send empty batch to signal that no more batches are incoming info!("Account Trie fully rebuilt, signaling bytecode fetcher process"); bytecode_sender.send(vec![]).await?; @@ -685,9 +684,11 @@ async fn handle_large_storage_range( } /// Heals the trie given its state_root by fetching any missing nodes in it via p2p +/// Doesn't store nodes, only leaf values to avoid inconsistent tries on restarts async fn heal_state_trie( bytecode_sender: Sender>, state_root: H256, + mut current_root: H256, store: Store, peers: Arc>, ) -> Result { @@ -721,13 +722,12 @@ async fn heal_state_trie( // For each fetched node: // - Add its children to the queue (if we don't have them already) // - If it is a leaf, request its bytecode & storage - // - Add it to the trie's state + // - If it is a leaf, add its path & value to the trie + // We cannot keep the trie state open + let mut trie = store.open_state_trie(current_root); for node in nodes { let path = paths.remove(0); - // We cannot keep the trie state open - let mut trie = store.open_state_trie(*EMPTY_TRIE_HASH); - let trie_state = trie.state_mut(); - paths.extend(node_missing_children(&node, &path, trie_state)?); + paths.extend(node_missing_children(&node, &path, trie.state())?); if let Node::Leaf(node) = &node { // Fetch bytecode & storage let account = AccountState::decode(&node.value)?; @@ -748,9 +748,11 @@ async fn heal_state_trie( { code_hashes.push(account.code_hash); } + // Write values to trie + trie.insert(account_hash.0.to_vec(), account.encode_to_vec())?; } - let hash = node.compute_hash(); - trie_state.write_node(node, hash)?; + // Update current root + current_root = trie.hash()?; } // Send storage & bytecode requests if !hahsed_addresses.is_empty() { @@ -768,6 +770,7 @@ async fn heal_state_trie( storage_sender.send(vec![]).await?; let pending_storage_heal_accounts = storage_healer_handler.await??; // Update pending list + // If a storage trie was left mid-healing we will heal it again let storage_healing_succesful = pending_storage_heal_accounts.is_empty(); if !storage_healing_succesful { info!("{} storages with pending healing", pending_storage_heal_accounts.len()); @@ -787,7 +790,9 @@ async fn storage_healer( ) -> Result, SyncError> { const BATCH_SIZE: usize = 200; // Pending list of storages to fetch - let mut pending_storages: Vec<(H256, Nibbles)> = vec![]; + // Each entry is made up of AccountHash -> (CurrentRoot, Paths) + let mut pending_storages: BTreeMap)> = BTreeMap::new(); + //let mut pending_storages: Vec<(H256, Nibbles)> = vec![]; // The pivot may become stale while the fetcher is active, we will still keep the process // alive until the end signal so we don't lose queued messages let mut stale = false; @@ -800,7 +805,7 @@ async fn storage_healer( pending_storages.extend( account_paths .into_iter() - .map(|acc_path| (acc_path, Nibbles::default())), + .map(|acc_path| (acc_path, (*EMPTY_TRIE_HASH, vec![Nibbles::default()]))), ); info!( "Received incoming storage heal request, current batch: {}/{BATCH_SIZE}", @@ -814,13 +819,14 @@ async fn storage_healer( // If we have enough pending storages to fill a batch // or if we have no more incoming batches, spawn a fetch process // If the pivot became stale don't process anything and just save incoming requests - while !stale && (pending_storages.len() >= BATCH_SIZE || !incoming && !pending_storages.is_empty()) { - let mut next_batch: BTreeMap> = BTreeMap::new(); - // Group pending storages by account path - // We do this here instead of keeping them sorted so we don't prioritize further nodes from the first tries - for (account, path) in pending_storages.drain(..BATCH_SIZE.min(pending_storages.len())) - { - next_batch.entry(account).or_default().push(path); + while !stale && !pending_storages.is_empty() { + let mut next_batch: BTreeMap)> = BTreeMap::new(); + // Fill batch + let mut batch_size = 0; + while batch_size < BATCH_SIZE { + let (key, val) = pending_storages.pop_first().unwrap(); + batch_size += val.1.len(); + next_batch.insert(key, val); } let return_batch = match heal_storage_batch(state_root, next_batch.clone(), peers.clone(), store.clone()).await { @@ -828,11 +834,7 @@ async fn storage_healer( Err(SyncError::StalePivot) => {stale = true; next_batch}, Err(err) => return Err(err), }; - for (acc_path, paths) in return_batch { - for path in paths { - pending_storages.push((acc_path, path)); - } - } + pending_storages.extend(return_batch.into_iter()); } } Ok(pending_storages.into_iter().map(|(h, _)| h).collect()) @@ -842,31 +844,39 @@ async fn storage_healer( /// fetches their respective nodes, stores them, and returns their children paths and the paths that couldn't be fetched so they can be returned to the queue async fn heal_storage_batch( state_root: H256, - mut batch: BTreeMap>, + mut batch: BTreeMap)>, peers: Arc>, store: Store, -) -> Result>, SyncError> { +) -> Result)>, SyncError> { for _ in 0..MAX_RETRIES { let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; + let req_batch = batch.iter().map(|(k, v)| (*k, v.1.clone())).collect(); if let Some(mut nodes) = peer - .request_storage_trienodes(state_root, batch.clone()) + .request_storage_trienodes(state_root, req_batch) .await { info!("Received {} nodes", nodes.len()); // Process the nodes for each account path - for (acc_path, paths) in batch.iter_mut() { - let mut trie = store.open_storage_trie(*acc_path, *EMPTY_TRIE_HASH); - let trie_state = trie.state_mut(); + for (acc_path, (root, paths)) in batch.iter_mut() { + let mut trie = store.open_storage_trie(*acc_path, *root); // Get the corresponding nodes for node in nodes.drain(..paths.len().min(nodes.len())) { let path = paths.remove(0); // Add children to batch - let children = node_missing_children(&node, &path, trie_state)?; + let children = node_missing_children(&node, &path, trie.state())?; paths.extend(children); - // Add node to the state - let hash = node.compute_hash(); - trie_state.write_node(node, hash)?; + // If it is a leaf node, insert values into the trie + if let Node::Leaf(leaf) = node { + let path = &path.concat(leaf.partial.clone()).to_bytes(); + if path.len() != 32 { + // Something went wrong + return Err(SyncError::CorruptPath); + } + trie.insert(path.to_vec(), leaf.value.encode_to_vec())?; + } } + // Update current root + *root = trie.hash()?; // Cut the loop if we ran out of nodes if nodes.is_empty() { break; diff --git a/crates/storage/store/engines/api.rs b/crates/storage/store/engines/api.rs index 53299b7e7c..06d0a875cc 100644 --- a/crates/storage/store/engines/api.rs +++ b/crates/storage/store/engines/api.rs @@ -258,11 +258,17 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { fn clear_header_download_checkpoint(&self) -> Result<(), StoreError>; - fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError>; + fn set_state_trie_root_checkpoint(&self, current_root: H256) -> Result<(), StoreError>; - fn get_state_trie_download_checkpoint(&self) -> Result, StoreError>; + fn get_state_trie_root_checkpoint(&self) -> Result, StoreError>; - fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError>; + fn clear_state_trie_root_checkpoint(&self) -> Result<(), StoreError>; + + fn set_state_trie_key_checkpoint(&self, last_key: H256) -> Result<(), StoreError>; + + fn get_state_trie_key_checkpoint(&self) -> Result, StoreError>; + + fn clear_state_trie_key_checkpoint(&self) -> Result<(), StoreError>; fn set_pending_storage_heal_accounts(&self, accounts: Vec) -> Result<(), StoreError>; diff --git a/crates/storage/store/engines/in_memory.rs b/crates/storage/store/engines/in_memory.rs index 51d0216a82..30bd8ec76d 100644 --- a/crates/storage/store/engines/in_memory.rs +++ b/crates/storage/store/engines/in_memory.rs @@ -59,8 +59,10 @@ struct ChainData { pub struct SnapState { /// Latest downloaded block header's hash from a previously aborted sync header_download_checkpoint: Option, - /// Current root hash of the latest State Trie + the last downloaded key - state_trie_download_checkpoint: Option<(H256, H256)>, + /// Current root hash of the latest State Trie (Used for both fetching and healing) + state_trie_root_checkpoint: Option, + /// Last downloaded key of the latest State Trie + state_trie_key_checkpoint: Option, /// Accounts which storage needs healing pending_storage_heal_accounts: Option>, } @@ -451,17 +453,31 @@ impl StoreEngine for Store { Ok(()) } - fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { - self.inner().snap_state.state_trie_download_checkpoint = Some((current_root, last_key)); + fn set_state_trie_root_checkpoint(&self, current_root: H256) -> Result<(), StoreError> { + self.inner().snap_state.state_trie_root_checkpoint = Some(current_root); Ok(()) } - fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { - Ok(self.inner().snap_state.state_trie_download_checkpoint) + fn get_state_trie_root_checkpoint(&self) -> Result, StoreError> { + Ok(self.inner().snap_state.state_trie_root_checkpoint) } - fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { - self.inner().snap_state.state_trie_download_checkpoint = None; + fn clear_state_trie_root_checkpoint(&self) -> Result<(), StoreError> { + self.inner().snap_state.state_trie_root_checkpoint = None; + Ok(()) + } + + fn set_state_trie_key_checkpoint(&self, last_key: H256) -> Result<(), StoreError> { + self.inner().snap_state.state_trie_key_checkpoint = Some(last_key); + Ok(()) + } + + fn get_state_trie_key_checkpoint(&self) -> Result, StoreError> { + Ok(self.inner().snap_state.state_trie_key_checkpoint) + } + + fn clear_state_trie_key_checkpoint(&self) -> Result<(), StoreError> { + self.inner().snap_state.state_trie_key_checkpoint = None; Ok(()) } diff --git a/crates/storage/store/engines/libmdbx.rs b/crates/storage/store/engines/libmdbx.rs index a3b8d99294..8ee077b58b 100644 --- a/crates/storage/store/engines/libmdbx.rs +++ b/crates/storage/store/engines/libmdbx.rs @@ -544,22 +544,40 @@ impl StoreEngine for Store { self.delete::(SnapStateIndex::HeaderDownloadCheckpoint) } - fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { + fn set_state_trie_root_checkpoint(&self, current_root: H256) -> Result<(), StoreError> { self.write::( - SnapStateIndex::StateTrieDownloadCheckpoint, - (current_root, last_key).encode_to_vec(), + SnapStateIndex::StateTrieRootCheckpoint, + current_root.encode_to_vec(), ) } - fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { - self.read::(SnapStateIndex::StateTrieDownloadCheckpoint)? - .map(|ref h| <(H256, H256)>::decode(h)) + fn get_state_trie_root_checkpoint(&self) -> Result, StoreError> { + self.read::(SnapStateIndex::StateTrieRootCheckpoint)? + .map(|ref h| H256::decode(h)) .transpose() .map_err(StoreError::RLPDecode) } - fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { - self.delete::(SnapStateIndex::StateTrieDownloadCheckpoint) + fn clear_state_trie_root_checkpoint(&self) -> Result<(), StoreError> { + self.delete::(SnapStateIndex::StateTrieRootCheckpoint) + } + + fn set_state_trie_key_checkpoint(&self, last_key: H256) -> Result<(), StoreError> { + self.write::( + SnapStateIndex::StateTrieRootCheckpoint, + last_key.encode_to_vec(), + ) + } + + fn get_state_trie_key_checkpoint(&self) -> Result, StoreError> { + self.read::(SnapStateIndex::StateTrieRootCheckpoint)? + .map(|ref h| H256::decode(h)) + .transpose() + .map_err(StoreError::RLPDecode) + } + + fn clear_state_trie_key_checkpoint(&self) -> Result<(), StoreError> { + self.delete::(SnapStateIndex::StateTrieRootCheckpoint) } fn set_pending_storage_heal_accounts(&self, accounts: Vec) -> Result<(), StoreError> { diff --git a/crates/storage/store/engines/utils.rs b/crates/storage/store/engines/utils.rs index b4c3820b3e..7163a3ec9e 100644 --- a/crates/storage/store/engines/utils.rs +++ b/crates/storage/store/engines/utils.rs @@ -42,10 +42,12 @@ impl From for ChainDataIndex { pub enum SnapStateIndex { // Hash of the last downloaded header in a previous sync cycle that was aborted HeaderDownloadCheckpoint = 0, - // Current root hash of the latest State Trie + the last downloaded key - StateTrieDownloadCheckpoint = 1, + // Current root hash of the latest State Trie (Used for both fetch & heal) + StateTrieRootCheckpoint = 1, // Accounts which storage needs healing PendingStorageHealAccounts = 2, + // Last key fetched from the state trie + StateTrieKeyCheckpoint = 3, } impl From for SnapStateIndex { @@ -54,12 +56,15 @@ impl From for SnapStateIndex { x if x == SnapStateIndex::HeaderDownloadCheckpoint as u8 => { SnapStateIndex::HeaderDownloadCheckpoint } - x if x == SnapStateIndex::StateTrieDownloadCheckpoint as u8 => { - SnapStateIndex::StateTrieDownloadCheckpoint + x if x == SnapStateIndex::StateTrieRootCheckpoint as u8 => { + SnapStateIndex::StateTrieRootCheckpoint } x if x == SnapStateIndex::PendingStorageHealAccounts as u8 => { SnapStateIndex::PendingStorageHealAccounts } + x if x == SnapStateIndex::StateTrieKeyCheckpoint as u8 => { + SnapStateIndex::StateTrieKeyCheckpoint + } _ => panic!("Invalid value when casting to SnapDataIndex: {}", value), } } diff --git a/crates/storage/store/storage.rs b/crates/storage/store/storage.rs index 8e8d6df354..e4dafb9901 100644 --- a/crates/storage/store/storage.rs +++ b/crates/storage/store/storage.rs @@ -1008,20 +1008,20 @@ impl Store { self.engine.get_header_download_checkpoint() } - pub fn clear_header_download_checkpoint(&self) -> Result<(), StoreError> { - self.engine.clear_header_download_checkpoint() + pub fn set_state_trie_root_checkpoint(&self, current_root: H256) -> Result<(), StoreError> { + self.engine.set_state_trie_root_checkpoint(current_root) } - pub fn set_state_trie_download_checkpoint(&self, current_root: H256, last_key: H256) -> Result<(), StoreError> { - self.engine.set_state_trie_download_checkpoint(current_root, last_key) + pub fn get_state_trie_root_checkpoint(&self) -> Result, StoreError> { + self.engine.get_state_trie_root_checkpoint() } - pub fn get_state_trie_download_checkpoint(&self) -> Result, StoreError> { - self.engine.get_state_trie_download_checkpoint() + pub fn set_state_trie_key_checkpoint(&self, last_key: H256) -> Result<(), StoreError> { + self.engine.set_state_trie_key_checkpoint(last_key) } - pub fn clear_state_trie_download_checkpoint(&self) -> Result<(), StoreError> { - self.engine.clear_state_trie_download_checkpoint() + pub fn get_state_trie_key_checkpoint(&self) -> Result, StoreError> { + self.engine.get_state_trie_key_checkpoint() } pub fn set_pending_storage_heal_accounts(&self, accounts: Vec) -> Result<(), StoreError> { @@ -1032,8 +1032,11 @@ impl Store { self.engine.get_pending_storage_heal_accounts() } - pub fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError> { - self.engine.clear_pending_storage_heal_accounts() + pub fn clear_snap_state(&self) -> Result<(), StoreError> { + //self.engine.clear_header_download_checkpoint()?; TODO: Uncomment + self.engine.clear_pending_storage_heal_accounts()?; + self.engine.clear_state_trie_root_checkpoint()?; + self.engine.clear_state_trie_key_checkpoint() } } From 6aec5b4b4d151425fef8aa51bb6147d93b75f9fc Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 12:51:29 -0300 Subject: [PATCH 126/189] Remove noise --- crates/networking/p2p/peer_channels.rs | 1 - crates/networking/p2p/sync.rs | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index ed2dcb3e1b..b6aba744f8 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -315,7 +315,6 @@ impl PeerChannels { }) .await .ok()??; - info!("Peer returned {} storage ranges", slots.len()); // Check we got a reasonable amount of storage ranges if slots.len() > storage_roots.len() || slots.is_empty() { return None; diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 414f96af6e..9e113176e5 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -579,9 +579,8 @@ async fn fetch_storage_batch( .await { info!( - "Received {} storage ranges, last batch incomplete: {}", + "Received {} storage ranges", keys.len(), - incomplete ); // Handle incomplete ranges if incomplete { From abc85d5af36bd920c7bf680c517d2078d1c67e10 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 13:07:45 -0300 Subject: [PATCH 127/189] Fix --- crates/networking/p2p/sync.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 9e113176e5..638d49f8df 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -722,9 +722,9 @@ async fn heal_state_trie( // - Add its children to the queue (if we don't have them already) // - If it is a leaf, request its bytecode & storage // - If it is a leaf, add its path & value to the trie - // We cannot keep the trie state open - let mut trie = store.open_state_trie(current_root); for node in nodes { + // We cannot keep the trie state open + let mut trie = store.open_state_trie(current_root); let path = paths.remove(0); paths.extend(node_missing_children(&node, &path, trie.state())?); if let Node::Leaf(node) = &node { @@ -749,9 +749,9 @@ async fn heal_state_trie( } // Write values to trie trie.insert(account_hash.0.to_vec(), account.encode_to_vec())?; + // Update current root + current_root = trie.hash()?; } - // Update current root - current_root = trie.hash()?; } // Send storage & bytecode requests if !hahsed_addresses.is_empty() { From 5f2bdd80a7bdb218326e84cb72ba15d1d80106ab Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 13:13:56 -0300 Subject: [PATCH 128/189] Soft reset --- cmd/ethrex/ethrex.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index 10dec719b6..a2ff5ba47a 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -145,6 +145,7 @@ async fn main() { let store = Store::new(&data_dir, EngineType::InMemory).expect("Failed to create Store"); } } + store.clear_snap_state(); let genesis = read_genesis_file(&network); store From 6cf2dd8018b32ccbf062497a69fa94fe2f29709b Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 13:14:27 -0300 Subject: [PATCH 129/189] Soft reset --- cmd/ethrex/ethrex.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index a2ff5ba47a..10dec719b6 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -145,7 +145,6 @@ async fn main() { let store = Store::new(&data_dir, EngineType::InMemory).expect("Failed to create Store"); } } - store.clear_snap_state(); let genesis = read_genesis_file(&network); store From 0111892db05fb126f1e882f75e01301bd36c8191 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 13:15:58 -0300 Subject: [PATCH 130/189] (test) try increasing response bytes --- crates/networking/p2p/peer_channels.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index b6aba744f8..104c4b2fc6 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -30,7 +30,7 @@ use crate::{ pub const PEER_REPLY_TIMOUT: Duration = Duration::from_secs(45); pub const MAX_MESSAGES_IN_PEER_CHANNEL: usize = 25; -pub const MAX_RESPONSE_BYTES: u64 = 512 * 1024; +pub const MAX_RESPONSE_BYTES: u64 = 512 * 1024 * 64; pub const HASH_MAX: H256 = H256([0xFF; 32]); #[derive(Debug, Clone)] From c3120d3b79a834fb6d56de9f78739a5318e3dc98 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 13:28:25 -0300 Subject: [PATCH 131/189] Unify batch sizes --- crates/networking/p2p/sync.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 638d49f8df..e7e42760fd 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -24,6 +24,8 @@ use crate::{kademlia::KademliaTable, peer_channels::BlockRequestOrder}; const MAX_RETRIES: usize = 5; /// The minimum amount of blocks from the head that we want to full sync during a snap sync const MIN_FULL_BLOCKS: usize = 64; +/// Max size of a bach to stat a fetch request in queues +const BATCH_SIZE: usize = 200; #[derive(Debug)] pub enum SyncMode { @@ -436,7 +438,6 @@ async fn bytecode_fetcher( peers: Arc>, store: Store, ) -> Result<(), SyncError> { - const BATCH_SIZE: usize = 100; let mut pending_bytecodes: Vec = vec![]; let mut incoming = true; while incoming { @@ -498,7 +499,6 @@ async fn storage_fetcher( store: Store, state_root: H256, ) -> Result, SyncError> { - const BATCH_SIZE: usize = 100; // Pending list of storages to fetch let mut pending_storage: Vec<(H256, H256)> = vec![]; // The pivot may become stale while the fetcher is active, we will still keep the process @@ -787,7 +787,6 @@ async fn storage_healer( peers: Arc>, store: Store, ) -> Result, SyncError> { - const BATCH_SIZE: usize = 200; // Pending list of storages to fetch // Each entry is made up of AccountHash -> (CurrentRoot, Paths) let mut pending_storages: BTreeMap)> = BTreeMap::new(); From e7542d28263df7e911f3a06824ba11de9c430d13 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 13:49:21 -0300 Subject: [PATCH 132/189] (test) try increasing batch size --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index e7e42760fd..c2d0720221 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -25,7 +25,7 @@ const MAX_RETRIES: usize = 5; /// The minimum amount of blocks from the head that we want to full sync during a snap sync const MIN_FULL_BLOCKS: usize = 64; /// Max size of a bach to stat a fetch request in queues -const BATCH_SIZE: usize = 200; +const BATCH_SIZE: usize = 300; #[derive(Debug)] pub enum SyncMode { From 69ed32217762d833dd884054ea0d80f1c18fb95e Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 14:06:57 -0300 Subject: [PATCH 133/189] Dont overwrite pending storages --- cmd/ethrex/ethrex.rs | 1 + crates/networking/p2p/sync.rs | 97 ++++++++++++++++------- crates/storage/store/engines/api.rs | 1 - crates/storage/store/engines/in_memory.rs | 7 +- 4 files changed, 74 insertions(+), 32 deletions(-) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index 10dec719b6..a2ff5ba47a 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -145,6 +145,7 @@ async fn main() { let store = Store::new(&data_dir, EngineType::InMemory).expect("Failed to create Store"); } } + store.clear_snap_state(); let genesis = read_genesis_file(&network); store diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index c2d0720221..7c8355025c 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -334,7 +334,9 @@ async fn rebuild_state_trie( )); // Resume download from checkpoint if available or start from an empty trie // We cannot keep an open trie here so we will track the root between lookups - let mut current_state_root = store.get_state_trie_root_checkpoint()?.unwrap_or(*EMPTY_TRIE_HASH); + let mut current_state_root = store + .get_state_trie_root_checkpoint()? + .unwrap_or(*EMPTY_TRIE_HASH); let mut start_account_hash = store.get_state_trie_key_checkpoint()?.unwrap_or_default(); info!("Starting/Resuming state trie download from key {start_account_hash}"); // Fetch Account Ranges @@ -415,16 +417,30 @@ async fn rebuild_state_trie( let pending_storages = pending_storage_accounts.is_empty(); // Next cycle may have different storage roots for these accounts so we will leave them to healing if pending_storages { - // (Assumption) If we are still fetching storages then we should have never started healing and have no pending healing accounts to overwrite - store.set_pending_storage_heal_accounts(pending_storage_accounts)?; + let mut stored_pending_storages = store + .get_pending_storage_heal_accounts()? + .unwrap_or_default(); + stored_pending_storages.extend(pending_storage_accounts); + info!( + "Current pending storage accounts: {}", + pending_storage_accounts.len() + ); + store.set_pending_storage_heal_accounts(stored_pending_storages)?; } if retry_count > MAX_RETRIES || pending_storages { // Skip healing and return stale status - return Ok(false) + return Ok(false); } // Perform state healing to fix inconsistencies with older state info!("Healing"); - let res = heal_state_trie(bytecode_sender.clone(), state_root, current_state_root, store.clone(), peers.clone()).await?; + let res = heal_state_trie( + bytecode_sender.clone(), + state_root, + current_state_root, + store.clone(), + peers.clone(), + ) + .await?; // Send empty batch to signal that no more batches are incoming info!("Account Trie fully rebuilt, signaling bytecode fetcher process"); bytecode_sender.send(vec![]).await?; @@ -531,18 +547,29 @@ async fn storage_fetcher( // If we have enough pending bytecodes to fill a batch // or if we have no more incoming batches, spawn a fetch process // If the pivot became stale don't process anything and just save incoming requests - while !stale && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) { + while !stale + && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) + { let now = Instant::now(); let next_batch = pending_storage .drain(..BATCH_SIZE.min(pending_storage.len())) .collect::>(); let batch_size = next_batch.len(); - let remaining = - match fetch_storage_batch(next_batch.clone(), state_root, peers.clone(), store.clone()).await { - Ok(r) => r, - Err(SyncError::StalePivot) => {stale = true; next_batch}, - Err(err) => return Err(err), - }; + let remaining = match fetch_storage_batch( + next_batch.clone(), + state_root, + peers.clone(), + store.clone(), + ) + .await + { + Ok(r) => r, + Err(SyncError::StalePivot) => { + stale = true; + next_batch + } + Err(err) => return Err(err), + }; let remaining_size = remaining.len(); // Add unfeched bytecodes back to the queue pending_storage.extend(remaining); @@ -555,7 +582,10 @@ async fn storage_fetcher( } info!("Finished processing current batches"); } - info!("Concluding storage fetcher, {} storages left in queue to be healed later", pending_storage.len()); + info!( + "Concluding storage fetcher, {} storages left in queue to be healed later", + pending_storage.len() + ); Ok(pending_storage.into_iter().map(|(acc, _)| acc).collect()) } @@ -578,10 +608,7 @@ async fn fetch_storage_batch( .request_storage_ranges(state_root, batch_roots, batch_hahses, H256::zero()) .await { - info!( - "Received {} storage ranges", - keys.len(), - ); + info!("Received {} storage ranges", keys.len(),); // Handle incomplete ranges if incomplete { // An incomplete range cannot be empty @@ -701,7 +728,10 @@ async fn heal_state_trie( )); // Check if we have pending storages to heal from a previous cycle if let Some(pending) = store.get_pending_storage_heal_accounts()? { - info!("Retrieved {} pending storage healing requests", pending.len()); + info!( + "Retrieved {} pending storage healing requests", + pending.len() + ); storage_sender.send(pending).await?; } // Begin by requesting the root node @@ -772,7 +802,10 @@ async fn heal_state_trie( // If a storage trie was left mid-healing we will heal it again let storage_healing_succesful = pending_storage_heal_accounts.is_empty(); if !storage_healing_succesful { - info!("{} storages with pending healing", pending_storage_heal_accounts.len()); + info!( + "{} storages with pending healing", + pending_storage_heal_accounts.len() + ); store.set_pending_storage_heal_accounts(pending_storage_heal_accounts)?; } Ok(retry_count < MAX_RETRIES && storage_healing_succesful) @@ -826,12 +859,21 @@ async fn storage_healer( batch_size += val.1.len(); next_batch.insert(key, val); } - let return_batch = - match heal_storage_batch(state_root, next_batch.clone(), peers.clone(), store.clone()).await { - Ok(b) => b, - Err(SyncError::StalePivot) => {stale = true; next_batch}, - Err(err) => return Err(err), - }; + let return_batch = match heal_storage_batch( + state_root, + next_batch.clone(), + peers.clone(), + store.clone(), + ) + .await + { + Ok(b) => b, + Err(SyncError::StalePivot) => { + stale = true; + next_batch + } + Err(err) => return Err(err), + }; pending_storages.extend(return_batch.into_iter()); } } @@ -849,10 +891,7 @@ async fn heal_storage_batch( for _ in 0..MAX_RETRIES { let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; let req_batch = batch.iter().map(|(k, v)| (*k, v.1.clone())).collect(); - if let Some(mut nodes) = peer - .request_storage_trienodes(state_root, req_batch) - .await - { + if let Some(mut nodes) = peer.request_storage_trienodes(state_root, req_batch).await { info!("Received {} nodes", nodes.len()); // Process the nodes for each account path for (acc_path, (root, paths)) in batch.iter_mut() { diff --git a/crates/storage/store/engines/api.rs b/crates/storage/store/engines/api.rs index 06d0a875cc..c146b2ea16 100644 --- a/crates/storage/store/engines/api.rs +++ b/crates/storage/store/engines/api.rs @@ -275,5 +275,4 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { fn get_pending_storage_heal_accounts(&self) -> Result>, StoreError>; fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError>; - } diff --git a/crates/storage/store/engines/in_memory.rs b/crates/storage/store/engines/in_memory.rs index 30bd8ec76d..b8eb2140ad 100644 --- a/crates/storage/store/engines/in_memory.rs +++ b/crates/storage/store/engines/in_memory.rs @@ -487,14 +487,17 @@ impl StoreEngine for Store { } fn get_pending_storage_heal_accounts(&self) -> Result>, StoreError> { - Ok(self.inner().snap_state.pending_storage_heal_accounts.clone()) + Ok(self + .inner() + .snap_state + .pending_storage_heal_accounts + .clone()) } fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError> { self.inner().snap_state.pending_storage_heal_accounts = None; Ok(()) } - } impl Debug for Store { From 868d35a757461ef722531324cd79bc12429e34a3 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 14:07:50 -0300 Subject: [PATCH 134/189] Dont overwrite pending storages --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 7c8355025c..7f0d716677 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -423,7 +423,7 @@ async fn rebuild_state_trie( stored_pending_storages.extend(pending_storage_accounts); info!( "Current pending storage accounts: {}", - pending_storage_accounts.len() + stored_pending_storages.len() ); store.set_pending_storage_heal_accounts(stored_pending_storages)?; } From 93d2f919705c782036a71339add2e2c0a73790c9 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 14:08:34 -0300 Subject: [PATCH 135/189] Remove soft reset --- cmd/ethrex/ethrex.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index a2ff5ba47a..10dec719b6 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -145,7 +145,6 @@ async fn main() { let store = Store::new(&data_dir, EngineType::InMemory).expect("Failed to create Store"); } } - store.clear_snap_state(); let genesis = read_genesis_file(&network); store From 8f90a1f3983d2120d9acc06484e0e3710d033164 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 14:45:45 -0300 Subject: [PATCH 136/189] Dont overwrite pending storages --- cmd/ethrex/ethrex.rs | 1 + crates/networking/p2p/sync.rs | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index 10dec719b6..a2ff5ba47a 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -145,6 +145,7 @@ async fn main() { let store = Store::new(&data_dir, EngineType::InMemory).expect("Failed to create Store"); } } + store.clear_snap_state(); let genesis = read_genesis_file(&network); store diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 7f0d716677..f0ee0e73b3 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -414,7 +414,7 @@ async fn rebuild_state_trie( // Send empty batch to signal that no more batches are incoming storage_sender.send(vec![]).await?; let pending_storage_accounts = storage_fetcher_handle.await??; - let pending_storages = pending_storage_accounts.is_empty(); + let pending_storages = !pending_storage_accounts.is_empty(); // Next cycle may have different storage roots for these accounts so we will leave them to healing if pending_storages { let mut stored_pending_storages = store @@ -427,7 +427,7 @@ async fn rebuild_state_trie( ); store.set_pending_storage_heal_accounts(stored_pending_storages)?; } - if retry_count > MAX_RETRIES || pending_storages { + if retry_count > MAX_RETRIES || !pending_storages { // Skip healing and return stale status return Ok(false); } From bf74c47f93ab35d3728e83bf8d1768d94fd530c9 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 14:46:23 -0300 Subject: [PATCH 137/189] Remove soft reset --- cmd/ethrex/ethrex.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index a2ff5ba47a..10dec719b6 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -145,7 +145,6 @@ async fn main() { let store = Store::new(&data_dir, EngineType::InMemory).expect("Failed to create Store"); } } - store.clear_snap_state(); let genesis = read_genesis_file(&network); store From 3723b8889a6919cdc16a5c273e0bfd9d0e280648 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 15:54:34 -0300 Subject: [PATCH 138/189] Show completion percentage during state sync --- crates/networking/p2p/sync.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index f0ee0e73b3..b2c00b57fe 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -1,7 +1,6 @@ use ethrex_blockchain::error::ChainError; use ethrex_core::{ - types::{AccountState, Block, BlockHash, EMPTY_KECCACK_HASH}, - H256, U256, + types::{AccountState, Block, BlockHash, EMPTY_KECCACK_HASH}, BigEndianHash, H256, U256, U512 }; use ethrex_rlp::{decode::RLPDecode, encode::RLPEncode, error::RLPDecodeError}; use ethrex_storage::{error::StoreError, Store}; @@ -342,7 +341,16 @@ async fn rebuild_state_trie( // Fetch Account Ranges // If we reached the maximum amount of retries then it means the state we are requesting is probably old and no longer available let mut retry_count = 0; + let mut timer = Instant::now(); + const PROGRESS_OUTPUT_TIMER: std::time::Duration = std::time::Duration::from_secs(30); while retry_count <= MAX_RETRIES { + // Show current progress percentage + if Instant::now().duration_since(timer) >= PROGRESS_OUTPUT_TIMER { + timer = Instant::now(); + // Add 1 here to avoid dividing by zero, the change should be inperceptible + let completion_rate: U512 = U512::from(start_account_hash.into_uint() + 1) / U512::from(U256::MAX); + info!("Downloading state trie, completion rate: {}%", completion_rate); + } let peer = peers .clone() .lock() From 4bf453ec8cb12d022075c588d1433aaf1daa6ce3 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 16:03:28 -0300 Subject: [PATCH 139/189] Debug --- crates/networking/p2p/sync.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index b2c00b57fe..152ae47d87 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -348,8 +348,8 @@ async fn rebuild_state_trie( if Instant::now().duration_since(timer) >= PROGRESS_OUTPUT_TIMER { timer = Instant::now(); // Add 1 here to avoid dividing by zero, the change should be inperceptible - let completion_rate: U512 = U512::from(start_account_hash.into_uint() + 1) / U512::from(U256::MAX); - info!("Downloading state trie, completion rate: {}%", completion_rate); + let completion_rate: U512 = dbg!(U512::from(start_account_hash.into_uint() + 1)) / dbg!(U512::from(U256::MAX)); + info!("Downloading state trie, completion rate: {}%", completion_rate) } let peer = peers .clone() From 75f2182a75c7d9e9ef3e2dc94cb1049bbc4b9546 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 16:07:12 -0300 Subject: [PATCH 140/189] Show completion percentage during state sync --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 152ae47d87..2cf646e0ff 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -348,7 +348,7 @@ async fn rebuild_state_trie( if Instant::now().duration_since(timer) >= PROGRESS_OUTPUT_TIMER { timer = Instant::now(); // Add 1 here to avoid dividing by zero, the change should be inperceptible - let completion_rate: U512 = dbg!(U512::from(start_account_hash.into_uint() + 1)) / dbg!(U512::from(U256::MAX)); + let completion_rate: U512 = U512::from(start_account_hash.into_uint() + 1) * 100 / U512::from(U256::MAX); info!("Downloading state trie, completion rate: {}%", completion_rate) } let peer = peers From 1f267e9729cd2eb7d1dc4f67957f535b98315056 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 17:11:10 -0300 Subject: [PATCH 141/189] Show progress: detatch task + show estimated time to finish --- crates/networking/p2p/sync.rs | 48 +++++++++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 8 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 2cf646e0ff..31b24776ab 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -1,6 +1,7 @@ use ethrex_blockchain::error::ChainError; use ethrex_core::{ - types::{AccountState, Block, BlockHash, EMPTY_KECCACK_HASH}, BigEndianHash, H256, U256, U512 + types::{AccountState, Block, BlockHash, EMPTY_KECCACK_HASH}, + BigEndianHash, H256, U256, U512, }; use ethrex_rlp::{decode::RLPDecode, encode::RLPEncode, error::RLPDecodeError}; use ethrex_storage::{error::StoreError, Store}; @@ -341,15 +342,15 @@ async fn rebuild_state_trie( // Fetch Account Ranges // If we reached the maximum amount of retries then it means the state we are requesting is probably old and no longer available let mut retry_count = 0; - let mut timer = Instant::now(); + let mut progress_timer = Instant::now(); + let initial_timestamp = Instant::now(); + let initial_account_hash = start_account_hash.into_uint(); const PROGRESS_OUTPUT_TIMER: std::time::Duration = std::time::Duration::from_secs(30); while retry_count <= MAX_RETRIES { - // Show current progress percentage - if Instant::now().duration_since(timer) >= PROGRESS_OUTPUT_TIMER { - timer = Instant::now(); - // Add 1 here to avoid dividing by zero, the change should be inperceptible - let completion_rate: U512 = U512::from(start_account_hash.into_uint() + 1) * 100 / U512::from(U256::MAX); - info!("Downloading state trie, completion rate: {}%", completion_rate) + // Show Progress stats (this task is not vital so we can detach it) + if Instant::now().duration_since(progress_timer) >= PROGRESS_OUTPUT_TIMER { + progress_timer = Instant::now(); + tokio::spawn(show_progress(start_account_hash, initial_account_hash, initial_timestamp)); } let peer = peers .clone() @@ -960,6 +961,37 @@ fn node_missing_children( Ok(paths) } +async fn show_progress(current_account_hash: H256, initial_account_hash: U256, start_time: Instant) { + // Calculate current progress percentage + // Add 1 here to avoid dividing by zero, the change should be inperceptible + let completion_rate: U512 = + U512::from(current_account_hash.into_uint() + 1) * 100 / U512::from(U256::MAX); + // Make a simple time to finish estimation based on current progress + // The estimation relies on account hashes being (close to) evenly distributed + let synced_account_hashes = current_account_hash.into_uint() - initial_account_hash; + let remaining_account_hashes = U256::MAX - current_account_hash.into_uint(); + // Time to finish = Time since start / synced_account_hashes * remaining_account_hashes + let time_to_finish_secs = + U512::from(Instant::now().duration_since(start_time).as_secs()) + * U512::from(remaining_account_hashes) + / U512::from(synced_account_hashes); + info!( + "Downloading state trie, completion rate: {}%, estimated time to finish: {}", + completion_rate, + seconds_to_readable(time_to_finish_secs) + ) +} + +fn seconds_to_readable(seconds: U512) -> String { + let (days, rest) = seconds.div_mod(U512::from(60 * 60 * 24)); + let (hours, rest) = rest.div_mod(U512::from(60 * 60)); + let (minutes, seconds) = rest.div_mod(U512::from(60)); + if days > U512::zero() { + return format!("Over {days} days"); + } + format!("{hours}h{minutes}m{seconds}s") +} + #[derive(thiserror::Error, Debug)] enum SyncError { #[error(transparent)] From 691d651813cf862bf0f1fb2ffe19b050fd8188d8 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 17:51:02 -0300 Subject: [PATCH 142/189] Spawn multiple tasks for storage fetcher --- crates/networking/p2p/sync.rs | 76 +++++++++++++++++------------------ 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 31b24776ab..504c950e94 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -350,7 +350,11 @@ async fn rebuild_state_trie( // Show Progress stats (this task is not vital so we can detach it) if Instant::now().duration_since(progress_timer) >= PROGRESS_OUTPUT_TIMER { progress_timer = Instant::now(); - tokio::spawn(show_progress(start_account_hash, initial_account_hash, initial_timestamp)); + tokio::spawn(show_progress( + start_account_hash, + initial_account_hash, + initial_timestamp, + )); } let peer = peers .clone() @@ -559,37 +563,29 @@ async fn storage_fetcher( while !stale && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) { - let now = Instant::now(); - let next_batch = pending_storage - .drain(..BATCH_SIZE.min(pending_storage.len())) - .collect::>(); - let batch_size = next_batch.len(); - let remaining = match fetch_storage_batch( - next_batch.clone(), - state_root, - peers.clone(), - store.clone(), - ) - .await + // We will be spawning multiple tasks and then collecting their results + // This uses a loop inside the main loop as the result from these tasks may lead to more values in queue + let mut storage_tasks = tokio::task::JoinSet::new(); + while !stale + && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) { - Ok(r) => r, - Err(SyncError::StalePivot) => { - stale = true; - next_batch - } - Err(err) => return Err(err), - }; - let remaining_size = remaining.len(); - // Add unfeched bytecodes back to the queue - pending_storage.extend(remaining); - info!( - "Processed Batch of size {} with {} remaing in {} secs", - batch_size, - remaining_size, - now.elapsed().as_secs() - ) + let next_batch = pending_storage + .drain(..BATCH_SIZE.min(pending_storage.len())) + .collect::>(); + storage_tasks.spawn(fetch_storage_batch( + next_batch.clone(), + state_root, + peers.clone(), + store.clone(), + )); + } + // Add unfetched accounts to queue and handle stale signal + for res in storage_tasks.join_all().await { + let (remaining, is_stale) = res?; + pending_storage.extend(remaining); + stale &= is_stale; + } } - info!("Finished processing current batches"); } info!( "Concluding storage fetcher, {} storages left in queue to be healed later", @@ -599,12 +595,13 @@ async fn storage_fetcher( } /// Receives a batch of account hashes with their storage roots, fetches their respective storage ranges via p2p and returns a list of the code hashes that couldn't be fetched in the request (if applicable) +/// Also returns a boolean indicating if the pivot became stale during the request async fn fetch_storage_batch( mut batch: Vec<(H256, H256)>, state_root: H256, peers: Arc>, store: Store, -) -> Result, SyncError> { +) -> Result<(Vec<(H256, H256)>, bool), SyncError> { info!( "Requesting storage ranges for addresses {}..{}", batch.first().unwrap().0, @@ -652,11 +649,11 @@ async fn fetch_storage_batch( } } // Return remaining code hashes in the batch if we couldn't fetch all of them - return Ok(batch); + return Ok((batch, false)); } } // Pivot became stale - Err(SyncError::StalePivot) + Ok((batch, true)) } /// Handles the returned incomplete storage range of a large storage trie and @@ -961,7 +958,11 @@ fn node_missing_children( Ok(paths) } -async fn show_progress(current_account_hash: H256, initial_account_hash: U256, start_time: Instant) { +async fn show_progress( + current_account_hash: H256, + initial_account_hash: U256, + start_time: Instant, +) { // Calculate current progress percentage // Add 1 here to avoid dividing by zero, the change should be inperceptible let completion_rate: U512 = @@ -971,10 +972,9 @@ async fn show_progress(current_account_hash: H256, initial_account_hash: U256, s let synced_account_hashes = current_account_hash.into_uint() - initial_account_hash; let remaining_account_hashes = U256::MAX - current_account_hash.into_uint(); // Time to finish = Time since start / synced_account_hashes * remaining_account_hashes - let time_to_finish_secs = - U512::from(Instant::now().duration_since(start_time).as_secs()) - * U512::from(remaining_account_hashes) - / U512::from(synced_account_hashes); + let time_to_finish_secs = U512::from(Instant::now().duration_since(start_time).as_secs()) + * U512::from(remaining_account_hashes) + / U512::from(synced_account_hashes); info!( "Downloading state trie, completion rate: {}%, estimated time to finish: {}", completion_rate, From 59fcde22543796258e4ba0c31db5b113cd6887dd Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 17:53:33 -0300 Subject: [PATCH 143/189] Trace storage tasks --- crates/networking/p2p/sync.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 504c950e94..a58bfbc37e 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -566,24 +566,30 @@ async fn storage_fetcher( // We will be spawning multiple tasks and then collecting their results // This uses a loop inside the main loop as the result from these tasks may lead to more values in queue let mut storage_tasks = tokio::task::JoinSet::new(); + let mut task_num = 0; while !stale && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) { let next_batch = pending_storage .drain(..BATCH_SIZE.min(pending_storage.len())) .collect::>(); + info!("Spawning storage fetcher number {task_num}"); storage_tasks.spawn(fetch_storage_batch( next_batch.clone(), state_root, peers.clone(), store.clone(), )); + task_num +=1; } // Add unfetched accounts to queue and handle stale signal + let mut ret_num = 0; for res in storage_tasks.join_all().await { let (remaining, is_stale) = res?; + info!("Task {}/{} returned {} elements to the queue", ret_num, task_num, remaining.len()); pending_storage.extend(remaining); stale &= is_stale; + ret_num +=1; } } } From 0bd9f537505a8bacbed3403487a1b36cd3cb6072 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 18:38:56 -0300 Subject: [PATCH 144/189] Fix bit logic --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index a58bfbc37e..467f16940d 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -588,7 +588,7 @@ async fn storage_fetcher( let (remaining, is_stale) = res?; info!("Task {}/{} returned {} elements to the queue", ret_num, task_num, remaining.len()); pending_storage.extend(remaining); - stale &= is_stale; + stale |= is_stale; ret_num +=1; } } From 4746e9ed0f807fcf05999300d29360f6ca18289b Mon Sep 17 00:00:00 2001 From: fmoletta Date: Fri, 17 Jan 2025 18:40:39 -0300 Subject: [PATCH 145/189] Fix tracing --- crates/networking/p2p/sync.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 467f16940d..bb9dcb1bd3 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -566,7 +566,7 @@ async fn storage_fetcher( // We will be spawning multiple tasks and then collecting their results // This uses a loop inside the main loop as the result from these tasks may lead to more values in queue let mut storage_tasks = tokio::task::JoinSet::new(); - let mut task_num = 0; + let mut task_num = 1; while !stale && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) { @@ -583,7 +583,7 @@ async fn storage_fetcher( task_num +=1; } // Add unfetched accounts to queue and handle stale signal - let mut ret_num = 0; + let mut ret_num = 1; for res in storage_tasks.join_all().await { let (remaining, is_stale) = res?; info!("Task {}/{} returned {} elements to the queue", ret_num, task_num, remaining.len()); From 91162b7dcf872beebf7e7f5cd1c05ff368cb625c Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 20 Jan 2025 14:39:14 -0300 Subject: [PATCH 146/189] Update bytes limit for peer requests --- crates/networking/p2p/peer_channels.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 104c4b2fc6..9ca7460d3e 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -30,7 +30,7 @@ use crate::{ pub const PEER_REPLY_TIMOUT: Duration = Duration::from_secs(45); pub const MAX_MESSAGES_IN_PEER_CHANNEL: usize = 25; -pub const MAX_RESPONSE_BYTES: u64 = 512 * 1024 * 64; +pub const MAX_RESPONSE_BYTES: u64 = 2 * 1024 * 1024; pub const HASH_MAX: H256 = H256([0xFF; 32]); #[derive(Debug, Clone)] From 87485db02d8da8c40a87658087b07aef00e9ea9d Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 20 Jan 2025 15:26:24 -0300 Subject: [PATCH 147/189] Remove internal StalePivot error --- crates/networking/p2p/sync.rs | 46 +++++++++++++++-------------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index bb9dcb1bd3..a907d4b747 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -630,7 +630,7 @@ async fn fetch_storage_batch( if keys.is_empty() { info!("Large storage trie encountered, handling separately"); let (account_hash, storage_root) = batch.remove(0); - handle_large_storage_range( + if handle_large_storage_range( state_root, account_hash, storage_root, @@ -639,7 +639,13 @@ async fn fetch_storage_batch( peers.clone(), store.clone(), ) - .await?; + .await? { + // Pivot became stale + info!("[DEBUG] Pivot became stale during large trie fetch, scheduling for healing"); + // Add trie back to the queue and return stale pivot status + batch.push((account_hash, storage_root)); + return Ok((batch, true)) + } } // The incomplete range is not the first, we cannot asume it is a large trie, so lets add it back to the queue } @@ -664,6 +670,7 @@ async fn fetch_storage_batch( /// Handles the returned incomplete storage range of a large storage trie and /// fetches the rest of the trie using single requests +/// Returns a boolean indicating is the pivot became stale during fetching // TODO: Later on this method can be refactored to use a separate queue process // instead of blocking the current thread for the remainder of the retrieval async fn handle_large_storage_range( @@ -674,7 +681,7 @@ async fn handle_large_storage_range( values: Vec, peers: Arc>, store: Store, -) -> Result<(), SyncError> { +) -> Result { // First process the initial range // Keep hold of the last key as this will be the first key of the next range let mut next_key = *keys.last().unwrap(); @@ -711,14 +718,10 @@ async fn handle_large_storage_range( } } } - if retry_count > MAX_RETRIES { - return Err(SyncError::StalePivot); - } - if current_root != storage_root { + if current_root != storage_root && retry_count <= MAX_RETRIES { warn!("State sync failed for storage root {storage_root}"); } - info!("Completely fetched large storage trie"); - Ok(()) + Ok(retry_count > MAX_RETRIES) } /// Heals the trie given its state_root by fetching any missing nodes in it via p2p @@ -871,35 +874,29 @@ async fn storage_healer( batch_size += val.1.len(); next_batch.insert(key, val); } - let return_batch = match heal_storage_batch( + let (return_batch, is_stale) = heal_storage_batch( state_root, next_batch.clone(), peers.clone(), store.clone(), ) - .await - { - Ok(b) => b, - Err(SyncError::StalePivot) => { - stale = true; - next_batch - } - Err(err) => return Err(err), - }; + .await?; pending_storages.extend(return_batch.into_iter()); + stale |= is_stale; } } Ok(pending_storages.into_iter().map(|(h, _)| h).collect()) } /// Receives a set of storage trie paths (grouped by their corresponding account's state trie path), -/// fetches their respective nodes, stores them, and returns their children paths and the paths that couldn't be fetched so they can be returned to the queue +/// fetches their respective nodes, stores their values, and returns their children paths and the paths that couldn't be fetched so they can be returned to the queue +/// Also returns a boolean indicating if the pivot became stale during the request async fn heal_storage_batch( state_root: H256, mut batch: BTreeMap)>, peers: Arc>, store: Store, -) -> Result)>, SyncError> { +) -> Result<(BTreeMap)>, bool), SyncError> { for _ in 0..MAX_RETRIES { let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; let req_batch = batch.iter().map(|(k, v)| (*k, v.1.clone())).collect(); @@ -932,11 +929,11 @@ async fn heal_storage_batch( } } // Return remaining and added paths to be added to the queue - return Ok(batch); + return Ok((batch, false)); } } // Pivot became stale, lets inform the fetcher - Err(SyncError::StalePivot) + Ok((batch, true)) } /// Returns the partial paths to the node's children if they are not already part of the trie state @@ -1018,7 +1015,4 @@ enum SyncError { JoinHandle(#[from] tokio::task::JoinError), #[error("Missing data from DB")] CorruptDB, - // This is an internal signal for fetcher processes and should not be returned by the main sync cycle - #[error("[INTERNAL] Stale Pivot")] - StalePivot, } From ccec4cc7e1b74e3a5c08454fa233a7cd6df3f2bb Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 20 Jan 2025 16:40:46 -0300 Subject: [PATCH 148/189] Handle edge case: sync head not found --- crates/networking/p2p/sync.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index a907d4b747..50d1df2e29 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -79,12 +79,12 @@ impl SyncManager { match self.sync_cycle(current_head, sync_head, store).await { Ok(()) => { info!( - "Sync finished, time elapsed: {} secs", + "Sync cycle finished, time elapsed: {} secs", start_time.elapsed().as_secs() ); } Err(error) => warn!( - "Sync failed due to {error}, time elapsed: {} secs ", + "Sync cycle failed due to {error}, time elapsed: {} secs ", start_time.elapsed().as_secs() ), } @@ -108,7 +108,8 @@ impl SyncManager { current_head = last_header; } } - loop { + let mut retry_count = 0; + while retry_count <= MAX_RETRIES { let peer = self .peers .lock() @@ -121,6 +122,7 @@ impl SyncManager { .request_block_headers(current_head, BlockRequestOrder::OldToNew) .await { + retry_count = 0; info!( "Received {} block headers| Last Number: {}", block_headers.len(), @@ -155,6 +157,12 @@ impl SyncManager { // No more headers to request break; } + } else { + retry_count += 1; + } + if retry_count > MAX_RETRIES { + warn!("Sync failed to find target block header, aborting"); + return Ok(()) } } // We finished fetching all headers, now we can process them From e282079aeb11805a72a9e814eea089fbebc4920f Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 20 Jan 2025 17:32:37 -0300 Subject: [PATCH 149/189] Remove debug code --- crates/common/types/fork_id.rs | 4 ++-- crates/networking/p2p/kademlia.rs | 7 ------- crates/networking/p2p/net.rs | 11 ++--------- crates/networking/p2p/peer_channels.rs | 24 +++--------------------- crates/storage/trie/verify_range.rs | 16 +--------------- 5 files changed, 8 insertions(+), 54 deletions(-) diff --git a/crates/common/types/fork_id.rs b/crates/common/types/fork_id.rs index 55cd3be25d..ec3945612f 100644 --- a/crates/common/types/fork_id.rs +++ b/crates/common/types/fork_id.rs @@ -12,8 +12,8 @@ use super::{BlockHash, BlockNumber, ChainConfig}; #[derive(Debug, PartialEq)] pub struct ForkId { - pub fork_hash: H32, - pub fork_next: BlockNumber, + fork_hash: H32, + fork_next: BlockNumber, } impl ForkId { diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index 6c90c2d858..3c24cc3815 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -308,7 +308,6 @@ impl KademliaTable { /// The peer is selected randomly, and doesn't guarantee that the selected peer is not currenlty busy /// If no peer is found, this method will try again after 10 seconds pub async fn get_peer_channels(&self, capability: Capability) -> PeerChannels { - //self.show_peer_stats(); let filter = |peer: &PeerData| -> bool { // Search for peers with an active connection that support the required capabilities peer.channels.is_some() && peer.supported_capabilities.contains(&capability) @@ -337,12 +336,6 @@ impl KademliaTable { let active_peers = self.filter_peers(&active_filter).count(); let snap_active_peers = self.filter_peers(&snap_active_filter).count(); info!("Snap Peers: {snap_active_peers} / Active Peers {active_peers} / Total Peers: {total_peers}"); - let active_peers = self - .filter_peers(&active_filter) - .map(|peer| peer.node.node_id.to_string()) - .collect::>() - .join(", "); - info!("Active Peers ID: {active_peers}"); } } diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 4d796afe1b..18b18dbc43 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -218,7 +218,6 @@ async fn discover_peers_server( continue; } if peer.last_ping_hash.unwrap() == msg.ping_hash { - debug!("Peer {} answered ping with pong", peer.node.node_id); table.lock().await.pong_answered(peer.node.node_id); if peer.channels.is_some() { debug!( @@ -409,16 +408,12 @@ async fn peers_revalidation( // first check that the peers we ping have responded for node_id in previously_pinged_peers { let mut table = table.lock().await; - let Some(peer) = table.get_by_node_id_mut(node_id) else { - continue; - }; + let peer = table.get_by_node_id_mut(node_id).unwrap(); if let Some(has_answered) = peer.revalidation { if has_answered { - debug!("Peer {node_id} answered revalidation ping"); peer.increment_liveness(); } else { - debug!("Peer {node_id} hasn't answered revalidation ping"); peer.decrement_liveness(); } } @@ -426,10 +421,9 @@ async fn peers_revalidation( peer.revalidation = None; if peer.liveness == 0 { - debug!("Replacing Peer {node_id} due to revalidation"); let new_peer = table.replace_peer(node_id); if let Some(new_peer) = new_peer { - let ping_hash: Option = ping( + let ping_hash = ping( &udp_socket, udp_addr, SocketAddr::new(new_peer.node.ip, new_peer.node.udp_port), @@ -782,7 +776,6 @@ async fn serve_requests( let tcp_socket = TcpSocket::new_v4().unwrap(); tcp_socket.bind(tcp_addr).unwrap(); let listener = tcp_socket.listen(50).unwrap(); - table.lock().await.show_peer_stats(); loop { let (stream, _peer_addr) = listener.accept().await.unwrap(); diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 9ca7460d3e..32ed5dbe65 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -9,7 +9,6 @@ use ethrex_rlp::encode::RLPEncode; use ethrex_trie::Nibbles; use ethrex_trie::{verify_range, Node}; use tokio::sync::{mpsc, Mutex}; -use tracing::{info, warn}; use crate::{ rlpx::{ @@ -92,25 +91,12 @@ impl PeerChannels { return Some(block_headers) } // Ignore replies that don't match the expected id (such as late responses) - Some(a) => { - warn!("UNEXPECTED RESPONSE: {a:?}"); - continue; - } - None => { - warn!("NO RESPONSE"); - return None; - } + Some(_) => continue, + None => return None, } } }) - .await; - if block_headers.is_err() { - warn!("PEER TIMEOUT"); - } - let block_headers = block_headers.ok()??; - if block_headers.is_empty() { - warn!("EMPTY BLOCK HEADERS RESPONSE"); - } + .await.ok()??; (!block_headers.is_empty()).then_some(block_headers) } @@ -217,9 +203,6 @@ impl PeerChannels { }) .await .ok()??; - if accounts.is_empty() && proof.is_empty() { - info!("Peer returned empty account range"); - } // Unzip & validate response let proof = encodable_to_proof(&proof); let (account_hashes, accounts): (Vec<_>, Vec<_>) = accounts @@ -333,7 +316,6 @@ impl PeerChannels { .unzip(); // We won't accept empty storage ranges if hahsed_keys.is_empty() { - info!("Empty Slot"); return None; } let encoded_values = values diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index bece7b341a..b96651cb37 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -2,27 +2,13 @@ use std::{cmp::Ordering, collections::HashMap}; use ethereum_types::H256; use sha3::{Digest, Keccak256}; -use tracing::{info, warn}; use crate::{ nibbles::Nibbles, node::Node, node_hash::NodeHash, state::TrieState, Trie, TrieError, ValueRLP, }; -pub fn verify_range( - root: H256, - first_key: &H256, - keys: &[H256], - values: &[ValueRLP], - proof: &[Vec], -) -> Result { - let e = verify_range_i(root, first_key, keys, values, proof); - if let Err(ref e) = e { - warn!("Verify range failure: {e}"); - } - e -} /// Verifies that the key value range belongs to the trie with the given root given the edge proofs for the range /// Also returns true if there is more state to be fetched (aka if there are more keys to the right of the given range) -pub fn verify_range_i( +pub fn verify_range( root: H256, first_key: &H256, keys: &[H256], From c92627d0ee1299e6168ca9fd1a046ed1c37cf45e Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 20 Jan 2025 17:37:56 -0300 Subject: [PATCH 150/189] Debug --- crates/networking/p2p/sync.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 50d1df2e29..a0978a0952 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -568,6 +568,9 @@ async fn storage_fetcher( // If we have enough pending bytecodes to fill a batch // or if we have no more incoming batches, spawn a fetch process // If the pivot became stale don't process anything and just save incoming requests + if stale { + info!("Storage fetcher detected stale pivot"); + } while !stale && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) { From b3531b79ed834f8b1658d6026487b8521e9dfc9f Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 20 Jan 2025 17:39:11 -0300 Subject: [PATCH 151/189] Remove debug code --- crates/networking/p2p/rlpx/eth/backend.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/eth/backend.rs b/crates/networking/p2p/rlpx/eth/backend.rs index 36f6498942..dd64bcaf88 100644 --- a/crates/networking/p2p/rlpx/eth/backend.rs +++ b/crates/networking/p2p/rlpx/eth/backend.rs @@ -69,7 +69,7 @@ pub fn validate_status(msg_data: StatusMessage, storage: &Store) -> Result<(), R )); } // Check ForkID - if msg_data.fork_id.fork_hash != fork_id.fork_hash { + if msg_data.fork_id!= fork_id { warn!("Fork Id Hash does not match") } From 181d5fd8065e0d083ff7a736baca418a395814ee Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 20 Jan 2025 17:47:19 -0300 Subject: [PATCH 152/189] Mute connection errors --- crates/networking/p2p/rlpx/connection.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 1d90d0190f..400e1dbe60 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -199,11 +199,11 @@ impl RLPxConnection { reason: self.match_disconnect_reason(&error), })) .await - .unwrap_or_else(|e| error!("Could not send Disconnect message: ({e}).")); + .unwrap_or_else(|e| debug!("Could not send Disconnect message: ({e}).")); // Discard peer from kademlia table let remote_node_id = self.remote_node_id; - error!("{error_text}: ({error}), discarding peer {remote_node_id}"); + debug!("{error_text}: ({error}), discarding peer {remote_node_id}"); table.lock().await.replace_peer(remote_node_id); } From cffb5820812c517e4a18622cca82a724528e96df Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 20 Jan 2025 17:51:57 -0300 Subject: [PATCH 153/189] Clean branch --- crates/networking/p2p/kademlia.rs | 2 +- crates/networking/p2p/peer_channels.rs | 2 +- crates/storage/trie/verify_range.rs | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/kademlia.rs b/crates/networking/p2p/kademlia.rs index 3c24cc3815..6a8b4182bd 100644 --- a/crates/networking/p2p/kademlia.rs +++ b/crates/networking/p2p/kademlia.rs @@ -335,7 +335,7 @@ impl KademliaTable { let total_peers = self.iter_peers().count(); let active_peers = self.filter_peers(&active_filter).count(); let snap_active_peers = self.filter_peers(&snap_active_filter).count(); - info!("Snap Peers: {snap_active_peers} / Active Peers {active_peers} / Total Peers: {total_peers}"); + info!("Snap Peers: {snap_active_peers} / Active Peers {active_peers} / Total Peers: {total_peers}") } } diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 32ed5dbe65..8c2ac78ecd 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -330,7 +330,7 @@ impl PeerChannels { verify_range(storage_root, &start, &hahsed_keys, &encoded_values, &proof) .ok()?; } else { - verify_range(storage_root, &start, &hahsed_keys, &encoded_values, &[]).ok(); + verify_range(storage_root, &start, &hahsed_keys, &encoded_values, &[]).ok()?; } storage_keys.push(hahsed_keys); diff --git a/crates/storage/trie/verify_range.rs b/crates/storage/trie/verify_range.rs index b96651cb37..8365354331 100644 --- a/crates/storage/trie/verify_range.rs +++ b/crates/storage/trie/verify_range.rs @@ -6,6 +6,7 @@ use sha3::{Digest, Keccak256}; use crate::{ nibbles::Nibbles, node::Node, node_hash::NodeHash, state::TrieState, Trie, TrieError, ValueRLP, }; + /// Verifies that the key value range belongs to the trie with the given root given the edge proofs for the range /// Also returns true if there is more state to be fetched (aka if there are more keys to the right of the given range) pub fn verify_range( From 2bc5078fbbdb1feea19b7e729d3aafcd753bfb3c Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 20 Jan 2025 18:23:25 -0300 Subject: [PATCH 154/189] Tone down sync tracing --- crates/networking/p2p/sync.rs | 100 ++++++++++------------------------ 1 file changed, 30 insertions(+), 70 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index a0978a0952..58b9c18b8f 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -14,7 +14,7 @@ use tokio::{ }, time::Instant, }; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; use crate::rlpx::p2p::Capability; use crate::{kademlia::KademliaTable, peer_channels::BlockRequestOrder}; @@ -116,14 +116,14 @@ impl SyncManager { .await .get_peer_channels(Capability::Eth) .await; - info!("Requesting Block Headers from {current_head}"); + debug!("Requesting Block Headers from {current_head}"); // Request Block Headers from Peer if let Some(mut block_headers) = peer .request_block_headers(current_head, BlockRequestOrder::OldToNew) .await { retry_count = 0; - info!( + debug!( "Received {} block headers| Last Number: {}", block_headers.len(), block_headers.last().as_ref().unwrap().number @@ -148,12 +148,6 @@ impl SyncManager { store.add_block_headers(block_hashes, block_headers.clone())?; if sync_head_found { - let sync_header = block_headers - .iter() - .find(|h| h.compute_block_hash() == sync_head) - .unwrap(); - info!("Found sync head at block: {}", sync_header.number); - // No more headers to request break; } @@ -180,7 +174,7 @@ impl SyncManager { let pivot_header = store .get_block_header_by_hash(all_block_hashes[pivot_idx])? .ok_or(SyncError::CorruptDB)?; - info!( + debug!( "Selected block {} as pivot for snap sync", pivot_header.number ); @@ -198,7 +192,7 @@ impl SyncManager { } // Wait for all bodies to be downloaded store_bodies_handle.await??; - // For all blocks before the pivot: Store the bodies and fetch the receipts + // For all blocks before the pivot: Store the bodies and fetch the receipts (TODO) // For all blocks after the pivot: Process them fully for hash in &all_block_hashes[pivot_idx..] { let block = store @@ -232,10 +226,10 @@ async fn download_and_run_blocks( ) -> Result<(), SyncError> { loop { let peer = peers.lock().await.get_peer_channels(Capability::Eth).await; - info!("Requesting Block Bodies "); + debug!("Requesting Block Bodies"); if let Some(block_bodies) = peer.request_block_bodies(block_hashes.clone()).await { let block_bodies_len = block_bodies.len(); - info!("Received {} Block Bodies", block_bodies_len); + debug!("Received {} Block Bodies", block_bodies_len); // Execute and store blocks for (hash, body) in block_hashes .drain(..block_bodies_len) @@ -253,7 +247,7 @@ async fn download_and_run_blocks( store.set_canonical_block(number, hash)?; store.update_latest_block_number(number)?; } - info!("Executed & stored {} blocks", block_bodies_len); + debug!("Executed & stored {} blocks", block_bodies_len); // Check if we need to ask for another batch if block_hashes.is_empty() { break; @@ -271,9 +265,9 @@ async fn store_block_bodies( ) -> Result<(), SyncError> { loop { let peer = peers.lock().await.get_peer_channels(Capability::Eth).await; - info!("Requesting Block Bodies "); + debug!("Requesting Block Bodies"); if let Some(block_bodies) = peer.request_block_bodies(block_hashes.clone()).await { - info!(" Received {} Block Bodies", block_bodies.len()); + debug!(" Received {} Block Bodies", block_bodies.len()); // Track which bodies we have already fetched let current_block_hashes = block_hashes.drain(..block_bodies.len()); // Add bodies to storage @@ -300,9 +294,9 @@ async fn store_receipts( ) -> Result<(), SyncError> { loop { let peer = peers.lock().await.get_peer_channels(Capability::Eth).await; - info!("Requesting Receipts"); + debug!("Requesting Receipts"); if let Some(receipts) = peer.request_receipts(block_hashes.clone()).await { - info!(" Received {} Receipts", receipts.len()); + debug!(" Received {} Receipts", receipts.len()); // Track which blocks we have already fetched receipts for for (block_hash, receipts) in block_hashes.drain(0..receipts.len()).zip(receipts) { store.add_receipts(block_hash, receipts)?; @@ -325,7 +319,7 @@ async fn rebuild_state_trie( peers: Arc>, store: Store, ) -> Result { - info!("Rebuilding State Trie"); + debug!("Rebuilding State Trie"); // Spawn storage & bytecode fetchers let (bytecode_sender, bytecode_receiver) = mpsc::channel::>(500); let (storage_sender, storage_receiver) = mpsc::channel::>(500); @@ -346,7 +340,7 @@ async fn rebuild_state_trie( .get_state_trie_root_checkpoint()? .unwrap_or(*EMPTY_TRIE_HASH); let mut start_account_hash = store.get_state_trie_key_checkpoint()?.unwrap_or_default(); - info!("Starting/Resuming state trie download from key {start_account_hash}"); + debug!("Starting/Resuming state trie download from key {start_account_hash}"); // Fetch Account Ranges // If we reached the maximum amount of retries then it means the state we are requesting is probably old and no longer available let mut retry_count = 0; @@ -370,12 +364,12 @@ async fn rebuild_state_trie( .await .get_peer_channels(Capability::Snap) .await; - info!("Requesting Account Range for state root {state_root}, starting hash: {start_account_hash}"); + debug!("Requesting Account Range for state root {state_root}, starting hash: {start_account_hash}"); if let Some((account_hashes, accounts, should_continue)) = peer .request_account_range(state_root, start_account_hash) .await { - info!("Received {} account ranges", accounts.len()); + debug!("Received {} account ranges", accounts.len()); // Reset retry counter retry_count = 0; // Update starting hash for next batch @@ -431,7 +425,7 @@ async fn rebuild_state_trie( store.set_state_trie_root_checkpoint(current_state_root)?; store.set_state_trie_key_checkpoint(start_account_hash)?; } - info!("Account Trie Fetching ended, signaling storage fetcher process"); + debug!("Account Trie Fetching ended, signaling storage fetcher process"); // Send empty batch to signal that no more batches are incoming storage_sender.send(vec![]).await?; let pending_storage_accounts = storage_fetcher_handle.await??; @@ -442,7 +436,7 @@ async fn rebuild_state_trie( .get_pending_storage_heal_accounts()? .unwrap_or_default(); stored_pending_storages.extend(pending_storage_accounts); - info!( + debug!( "Current pending storage accounts: {}", stored_pending_storages.len() ); @@ -453,7 +447,7 @@ async fn rebuild_state_trie( return Ok(false); } // Perform state healing to fix inconsistencies with older state - info!("Healing"); + info!("Starting state healing"); let res = heal_state_trie( bytecode_sender.clone(), state_root, @@ -463,7 +457,7 @@ async fn rebuild_state_trie( ) .await?; // Send empty batch to signal that no more batches are incoming - info!("Account Trie fully rebuilt, signaling bytecode fetcher process"); + debug!("Account Trie fully rebuilt, signaling bytecode fetcher process"); bytecode_sender.send(vec![]).await?; bytecode_fetcher_handle.await??; Ok(res) @@ -482,14 +476,9 @@ async fn bytecode_fetcher( match receiver.recv().await { Some(code_hashes) if !code_hashes.is_empty() => { pending_bytecodes.extend(code_hashes); - info!( - "Received incoming bytecode request, current batch: {}/{BATCH_SIZE}", - pending_bytecodes.len() - ) } // Disconnect / Empty message signaling no more bytecodes to sync _ => { - info!("Final bytecode batch"); incoming = false } } @@ -516,7 +505,7 @@ async fn fetch_bytecode_batch( loop { let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; if let Some(bytecodes) = peer.request_bytecodes(batch.clone()).await { - info!("Received {} bytecodes", bytecodes.len()); + debug!("Received {} bytecodes", bytecodes.len()); // Store the bytecodes for code in bytecodes.into_iter() { store.add_account_code(batch.remove(0), code)?; @@ -544,67 +533,46 @@ async fn storage_fetcher( let mut incoming = true; while incoming { // Fetch incoming requests - let awaiting_batch = Instant::now(); match receiver.recv().await { Some(account_hashes_and_roots) if !account_hashes_and_roots.is_empty() => { - info!( - "Spent {} secs waiting for incoming batch", - awaiting_batch.elapsed().as_secs() - ); pending_storage.extend(account_hashes_and_roots); - info!( - "Received incoming storage range request, current batch: {}/{BATCH_SIZE}", - pending_storage.len() - ); - info!("Number of messages in receiver: {}", receiver.len()); } // Disconnect / Empty message signaling no more bytecodes to sync _ => { - info!("Final storage batch"); incoming = false } } - info!("Processing current batches"); // If we have enough pending bytecodes to fill a batch // or if we have no more incoming batches, spawn a fetch process // If the pivot became stale don't process anything and just save incoming requests - if stale { - info!("Storage fetcher detected stale pivot"); - } while !stale && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) { // We will be spawning multiple tasks and then collecting their results // This uses a loop inside the main loop as the result from these tasks may lead to more values in queue let mut storage_tasks = tokio::task::JoinSet::new(); - let mut task_num = 1; while !stale && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) { let next_batch = pending_storage .drain(..BATCH_SIZE.min(pending_storage.len())) .collect::>(); - info!("Spawning storage fetcher number {task_num}"); storage_tasks.spawn(fetch_storage_batch( next_batch.clone(), state_root, peers.clone(), store.clone(), )); - task_num +=1; } // Add unfetched accounts to queue and handle stale signal - let mut ret_num = 1; for res in storage_tasks.join_all().await { let (remaining, is_stale) = res?; - info!("Task {}/{} returned {} elements to the queue", ret_num, task_num, remaining.len()); pending_storage.extend(remaining); stale |= is_stale; - ret_num +=1; } } } - info!( + debug!( "Concluding storage fetcher, {} storages left in queue to be healed later", pending_storage.len() ); @@ -631,7 +599,7 @@ async fn fetch_storage_batch( .request_storage_ranges(state_root, batch_roots, batch_hahses, H256::zero()) .await { - info!("Received {} storage ranges", keys.len(),); + debug!("Received {} storage ranges", keys.len(),); // Handle incomplete ranges if incomplete { // An incomplete range cannot be empty @@ -639,7 +607,7 @@ async fn fetch_storage_batch( // If only one incomplete range is returned then it must belong to a trie that is too big to fit into one request // We will handle this large trie separately if keys.is_empty() { - info!("Large storage trie encountered, handling separately"); + debug!("Large storage trie encountered, handling separately"); let (account_hash, storage_root) = batch.remove(0); if handle_large_storage_range( state_root, @@ -652,7 +620,6 @@ async fn fetch_storage_batch( ) .await? { // Pivot became stale - info!("[DEBUG] Pivot became stale during large trie fetch, scheduling for healing"); // Add trie back to the queue and return stale pivot status batch.push((account_hash, storage_root)); return Ok((batch, true)) @@ -709,7 +676,7 @@ async fn handle_large_storage_range( let mut retry_count = 0; while should_continue { while retry_count <= MAX_RETRIES { - info!("Fetching large storage trie, current key: {}", next_key); + debug!("Fetching large storage trie, current key: {}", next_key); let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; if let Some((keys, values, incomplete)) = peer .request_storage_range(state_root, storage_root, account_hash, next_key) @@ -754,7 +721,7 @@ async fn heal_state_trie( )); // Check if we have pending storages to heal from a previous cycle if let Some(pending) = store.get_pending_storage_heal_accounts()? { - info!( + debug!( "Retrieved {} pending storage healing requests", pending.len() ); @@ -820,7 +787,7 @@ async fn heal_state_trie( retry_count += 1; } } - info!("State Healing stopped, signaling storage healer"); + debug!("State Healing stopped, signaling storage healer"); // Send empty batch to signal that no more batches are incoming storage_sender.send(vec![]).await?; let pending_storage_heal_accounts = storage_healer_handler.await??; @@ -828,10 +795,6 @@ async fn heal_state_trie( // If a storage trie was left mid-healing we will heal it again let storage_healing_succesful = pending_storage_heal_accounts.is_empty(); if !storage_healing_succesful { - info!( - "{} storages with pending healing", - pending_storage_heal_accounts.len() - ); store.set_pending_storage_heal_accounts(pending_storage_heal_accounts)?; } Ok(retry_count < MAX_RETRIES && storage_healing_succesful) @@ -864,11 +827,6 @@ async fn storage_healer( .into_iter() .map(|acc_path| (acc_path, (*EMPTY_TRIE_HASH, vec![Nibbles::default()]))), ); - info!( - "Received incoming storage heal request, current batch: {}/{BATCH_SIZE}", - pending_storages.len() - ); - info!("Number of messages in receiver: {}", receiver.len()); } // Disconnect / Empty message signaling no more bytecodes to sync _ => incoming = false, @@ -912,7 +870,7 @@ async fn heal_storage_batch( let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; let req_batch = batch.iter().map(|(k, v)| (*k, v.1.clone())).collect(); if let Some(mut nodes) = peer.request_storage_trienodes(state_root, req_batch).await { - info!("Received {} nodes", nodes.len()); + debug!("Received {} nodes", nodes.len()); // Process the nodes for each account path for (acc_path, (root, paths)) in batch.iter_mut() { let mut trie = store.open_storage_trie(*acc_path, *root); @@ -972,6 +930,8 @@ fn node_missing_children( Ok(paths) } +/// Shows the completion rate & estimated remaining time of the state sync phase of snap sync +/// Does not take into account healing async fn show_progress( current_account_hash: H256, initial_account_hash: U256, From f53634d39bd26c51b900c569087814fa96be3dd4 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 10:36:06 -0300 Subject: [PATCH 155/189] Tone down sync tracing --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 58b9c18b8f..6a886184e4 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -587,7 +587,7 @@ async fn fetch_storage_batch( peers: Arc>, store: Store, ) -> Result<(Vec<(H256, H256)>, bool), SyncError> { - info!( + debug!( "Requesting storage ranges for addresses {}..{}", batch.first().unwrap().0, batch.last().unwrap().0 From a1510330879ea5516b0b86e966c2d7ec690ea2cf Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 10:39:48 -0300 Subject: [PATCH 156/189] fmt + replace debug with error return --- crates/networking/p2p/peer_channels.rs | 3 ++- crates/networking/p2p/rlpx/eth/backend.rs | 2 +- crates/networking/p2p/rlpx/handshake.rs | 2 +- crates/networking/p2p/sync.rs | 25 ++++++++--------------- 4 files changed, 13 insertions(+), 19 deletions(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index 8c2ac78ecd..cbf5788cf6 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -96,7 +96,8 @@ impl PeerChannels { } } }) - .await.ok()??; + .await + .ok()??; (!block_headers.is_empty()).then_some(block_headers) } diff --git a/crates/networking/p2p/rlpx/eth/backend.rs b/crates/networking/p2p/rlpx/eth/backend.rs index dd64bcaf88..19078b75f3 100644 --- a/crates/networking/p2p/rlpx/eth/backend.rs +++ b/crates/networking/p2p/rlpx/eth/backend.rs @@ -69,7 +69,7 @@ pub fn validate_status(msg_data: StatusMessage, storage: &Store) -> Result<(), R )); } // Check ForkID - if msg_data.fork_id!= fork_id { + if msg_data.fork_id != fork_id { warn!("Fork Id Hash does not match") } diff --git a/crates/networking/p2p/rlpx/handshake.rs b/crates/networking/p2p/rlpx/handshake.rs index cd0dc9240c..0d5b0685aa 100644 --- a/crates/networking/p2p/rlpx/handshake.rs +++ b/crates/networking/p2p/rlpx/handshake.rs @@ -118,7 +118,7 @@ fn decrypt_message( // Verify the MAC. let expected_d = sha256_hmac(&mac_key, &[iv, c], size_data); if d != expected_d { - debug!("Mismatched MAC") + return Err(RLPxError::HandshakeError(String::from("Invalid MAC"))); } // Decrypt the message with the AES key. diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 6a886184e4..adbc7a79b5 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -156,7 +156,7 @@ impl SyncManager { } if retry_count > MAX_RETRIES { warn!("Sync failed to find target block header, aborting"); - return Ok(()) + return Ok(()); } } // We finished fetching all headers, now we can process them @@ -478,9 +478,7 @@ async fn bytecode_fetcher( pending_bytecodes.extend(code_hashes); } // Disconnect / Empty message signaling no more bytecodes to sync - _ => { - incoming = false - } + _ => incoming = false, } // If we have enough pending bytecodes to fill a batch // or if we have no more incoming batches, spawn a fetch process @@ -538,9 +536,7 @@ async fn storage_fetcher( pending_storage.extend(account_hashes_and_roots); } // Disconnect / Empty message signaling no more bytecodes to sync - _ => { - incoming = false - } + _ => incoming = false, } // If we have enough pending bytecodes to fill a batch // or if we have no more incoming batches, spawn a fetch process @@ -618,11 +614,12 @@ async fn fetch_storage_batch( peers.clone(), store.clone(), ) - .await? { + .await? + { // Pivot became stale // Add trie back to the queue and return stale pivot status batch.push((account_hash, storage_root)); - return Ok((batch, true)) + return Ok((batch, true)); } } // The incomplete range is not the first, we cannot asume it is a large trie, so lets add it back to the queue @@ -843,13 +840,9 @@ async fn storage_healer( batch_size += val.1.len(); next_batch.insert(key, val); } - let (return_batch, is_stale) = heal_storage_batch( - state_root, - next_batch.clone(), - peers.clone(), - store.clone(), - ) - .await?; + let (return_batch, is_stale) = + heal_storage_batch(state_root, next_batch.clone(), peers.clone(), store.clone()) + .await?; pending_storages.extend(return_batch.into_iter()); stale |= is_stale; } From d84c034447fc3cdc96f94abe3a7d51c37afb0949 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 10:42:06 -0300 Subject: [PATCH 157/189] Add debug message on removed unwrap --- crates/networking/p2p/net.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 18b18dbc43..f26deef821 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -814,6 +814,7 @@ async fn handle_peer_as_initiator( .connect(SocketAddr::new(node.ip, node.tcp_port)) .await else { + debug!("Failed to connect to peer via tcp"); return; }; match RLPxConnection::initiator(signer, msg, stream, storage, connection_broadcast) { From 199f25a6074916c5f26b34ab7498ec5224e31106 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 10:46:09 -0300 Subject: [PATCH 158/189] Clearly point out hacky fixes --- crates/networking/p2p/net.rs | 1 + crates/networking/p2p/rlpx/connection.rs | 8 ++++++-- crates/networking/p2p/rlpx/eth/backend.rs | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index f26deef821..726d146c6f 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -219,6 +219,7 @@ async fn discover_peers_server( } if peer.last_ping_hash.unwrap() == msg.ping_hash { table.lock().await.pong_answered(peer.node.node_id); + // TODO: This is a hacky fix for the problem reported in https://github.com/lambdaclass/ethrex/issues/1684 if peer.channels.is_some() { debug!( "Skip trying to connect to already connected peer {}", diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 400e1dbe60..cdcb9e9e24 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -51,7 +51,7 @@ use tokio::{ }; use tokio_stream::StreamExt; use tokio_util::codec::Framed; -use tracing::{debug, error}; +use tracing::{debug, error, warn}; const CAP_P2P: (Capability, u8) = (Capability::P2p, 5); const CAP_ETH: (Capability, u8) = (Capability::Eth, 68); const CAP_SNAP: (Capability, u8) = (Capability::Snap, 1); @@ -304,7 +304,11 @@ impl RLPxConnection { tokio::select! { // Expect a message from the remote peer message = self.receive() => { - let _ = self.handle_message(message?, sender.clone()).await; + // TODO: This is a hacky fix for the problem reported in https://github.com/lambdaclass/ethrex/issues/1685 + let res = self.handle_message(message?, sender.clone()).await; + if let Err(err) = res { + warn!("Handle message failed with {err:?}"); + } } // Expect a message from the backend Some(message) = receiver.recv() => { diff --git a/crates/networking/p2p/rlpx/eth/backend.rs b/crates/networking/p2p/rlpx/eth/backend.rs index 19078b75f3..3b8527f9b3 100644 --- a/crates/networking/p2p/rlpx/eth/backend.rs +++ b/crates/networking/p2p/rlpx/eth/backend.rs @@ -69,6 +69,7 @@ pub fn validate_status(msg_data: StatusMessage, storage: &Store) -> Result<(), R )); } // Check ForkID + // TODO: This is a hacky fix for the problem reported in https://github.com/lambdaclass/ethrex/issues/1685 if msg_data.fork_id != fork_id { warn!("Fork Id Hash does not match") } From 55ad8349688617e26aa62ed2278db278a50a4d62 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 12:12:15 -0300 Subject: [PATCH 159/189] Update redb impl --- cmd/ethrex/Cargo.toml | 2 +- crates/storage/store/engines/redb.rs | 121 +++++++++++++++++++++++++++ crates/storage/trie/db/utils.rs | 2 +- 3 files changed, 123 insertions(+), 2 deletions(-) diff --git a/cmd/ethrex/Cargo.toml b/cmd/ethrex/Cargo.toml index 05f707e54a..ccdf642a5e 100644 --- a/cmd/ethrex/Cargo.toml +++ b/cmd/ethrex/Cargo.toml @@ -42,7 +42,7 @@ name = "ethrex" path = "./ethrex.rs" [features] -default = ["dep:ethrex-storage", "libmdbx"] +default = ["dep:ethrex-storage", "redb"] dev = ["dep:ethrex-dev"] metrics = ["ethrex-blockchain/metrics", "ethrex-l2/metrics"] libmdbx = ["dep:libmdbx", "ethrex-storage/libmdbx"] diff --git a/crates/storage/store/engines/redb.rs b/crates/storage/store/engines/redb.rs index e1d27bcd72..3c57a00b20 100644 --- a/crates/storage/store/engines/redb.rs +++ b/crates/storage/store/engines/redb.rs @@ -22,6 +22,7 @@ use crate::{ }, }; +use super::utils::SnapStateIndex; use super::{api::StoreEngine, utils::ChainDataIndex}; const STATE_TRIE_NODES_TABLE: TableDefinition<&[u8], &[u8]> = @@ -52,6 +53,8 @@ const TRANSACTION_LOCATIONS_TABLE: MultimapTableDefinition< TransactionHashRLP, Rlp<(BlockNumber, BlockHash, Index)>, > = MultimapTableDefinition::new("TransactionLocations"); +const SNAP_STATE_TABLE: TableDefinition> = + TableDefinition::new("SnapState"); #[derive(Debug)] pub struct RedBStore { @@ -697,6 +700,82 @@ impl StoreEngine for RedBStore { .map(|receipt| receipt.to()) .collect()) } + + fn set_header_download_checkpoint(&self, block_hash: BlockHash) -> Result<(), StoreError> { + self.write( + SNAP_STATE_TABLE, + SnapStateIndex::HeaderDownloadCheckpoint, + block_hash.encode_to_vec(), + ) + } + + fn get_header_download_checkpoint(&self) -> Result, StoreError> { + self.read(SNAP_STATE_TABLE, SnapStateIndex::HeaderDownloadCheckpoint)? + .map(|rlp| RLPDecode::decode(&rlp.value())) + .transpose() + .map_err(StoreError::RLPDecode) + } + + fn clear_header_download_checkpoint(&self) -> Result<(), StoreError> { + self.delete(SNAP_STATE_TABLE, SnapStateIndex::HeaderDownloadCheckpoint) + } + + fn set_state_trie_root_checkpoint(&self, current_root: H256) -> Result<(), StoreError> { + self.write( + SNAP_STATE_TABLE, + SnapStateIndex::StateTrieRootCheckpoint, + current_root.encode_to_vec(), + ) + } + + fn get_state_trie_root_checkpoint(&self) -> Result, StoreError> { + self.read(SNAP_STATE_TABLE, SnapStateIndex::StateTrieRootCheckpoint)? + .map(|rlp| RLPDecode::decode(&rlp.value())) + .transpose() + .map_err(StoreError::RLPDecode) + } + + fn clear_state_trie_root_checkpoint(&self) -> Result<(), StoreError> { + self.delete(SNAP_STATE_TABLE, SnapStateIndex::StateTrieRootCheckpoint) + } + + fn set_state_trie_key_checkpoint(&self, last_key: H256) -> Result<(), StoreError> { + self.write( + SNAP_STATE_TABLE, + SnapStateIndex::StateTrieKeyCheckpoint, + last_key.encode_to_vec(), + ) + } + + fn get_state_trie_key_checkpoint(&self) -> Result, StoreError> { + self.read(SNAP_STATE_TABLE, SnapStateIndex::StateTrieKeyCheckpoint)? + .map(|rlp| RLPDecode::decode(&rlp.value())) + .transpose() + .map_err(StoreError::RLPDecode) + } + + fn clear_state_trie_key_checkpoint(&self) -> Result<(), StoreError> { + self.delete(SNAP_STATE_TABLE, SnapStateIndex::StateTrieKeyCheckpoint) + } + + fn set_pending_storage_heal_accounts(&self, accounts: Vec) -> Result<(), StoreError> { + self.write( + SNAP_STATE_TABLE, + SnapStateIndex::PendingStorageHealAccounts, + accounts.encode_to_vec(), + ) + } + + fn get_pending_storage_heal_accounts(&self) -> Result>, StoreError> { + self.read(SNAP_STATE_TABLE, SnapStateIndex::PendingStorageHealAccounts)? + .map(|rlp| RLPDecode::decode(&rlp.value())) + .transpose() + .map_err(StoreError::RLPDecode) + } + + fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError> { + self.delete(SNAP_STATE_TABLE, SnapStateIndex::PendingStorageHealAccounts) + } } impl redb::Value for ChainDataIndex { @@ -740,6 +819,47 @@ impl redb::Key for ChainDataIndex { } } +impl redb::Value for SnapStateIndex { + type SelfType<'a> + = SnapStateIndex + where + Self: 'a; + + type AsBytes<'a> + = [u8; 1] + where + Self: 'a; + + fn fixed_width() -> Option { + None + } + + fn from_bytes<'a>(data: &'a [u8]) -> Self::SelfType<'a> + where + Self: 'a, + { + data[0].into() + } + + fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> + where + Self: 'a, + Self: 'b, + { + [*value as u8] + } + + fn type_name() -> redb::TypeName { + TypeName::new("SnapStateIndex") + } +} + +impl redb::Key for SnapStateIndex { + fn compare(data1: &[u8], data2: &[u8]) -> std::cmp::Ordering { + data1.cmp(data2) + } +} + pub fn init_db() -> Result { let db = Database::create("ethrex.redb")?; @@ -755,6 +875,7 @@ pub fn init_db() -> Result { table_creation_txn.open_table(PAYLOADS_TABLE)?; table_creation_txn.open_table(PENDING_BLOCKS_TABLE)?; table_creation_txn.open_multimap_table(TRANSACTION_LOCATIONS_TABLE)?; + table_creation_txn.open_table(SNAP_STATE_TABLE)?; table_creation_txn.commit()?; Ok(db) diff --git a/crates/storage/trie/db/utils.rs b/crates/storage/trie/db/utils.rs index c1ab27e82a..10141a2e5a 100644 --- a/crates/storage/trie/db/utils.rs +++ b/crates/storage/trie/db/utils.rs @@ -1,4 +1,4 @@ -#[cfg(feature = "libmdbx")] +#[cfg(any(feature = "libmdbx", feature = "redb"))] // In order to use NodeHash as key in a dupsort table we must encode it into a fixed size type pub fn node_hash_to_fixed_size(node_hash: Vec) -> [u8; 33] { // keep original len so we can re-construct it later From 9fa43286c5e96880d7d5997a98ec964cd6cde542 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 12:14:50 -0300 Subject: [PATCH 160/189] Fix --- cmd/ethrex/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/ethrex/Cargo.toml b/cmd/ethrex/Cargo.toml index ccdf642a5e..05f707e54a 100644 --- a/cmd/ethrex/Cargo.toml +++ b/cmd/ethrex/Cargo.toml @@ -42,7 +42,7 @@ name = "ethrex" path = "./ethrex.rs" [features] -default = ["dep:ethrex-storage", "redb"] +default = ["dep:ethrex-storage", "libmdbx"] dev = ["dep:ethrex-dev"] metrics = ["ethrex-blockchain/metrics", "ethrex-l2/metrics"] libmdbx = ["dep:libmdbx", "ethrex-storage/libmdbx"] From f7b0ee9eec49235813153b5b207702085bae24d2 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 12:17:37 -0300 Subject: [PATCH 161/189] Fix --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index adbc7a79b5..a62e20ac6b 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -310,7 +310,7 @@ async fn store_receipts( Ok(()) } -/// Rebuilds a Block's state trie by requesting snap state from peers, also performs state healing (TODO) +/// Rebuilds a Block's state trie by requesting snap state from peers, also performs state healing /// Receives an optional checkpoint in case there was a previous snap sync process that became stale, in which /// case it will continue from the checkpoint and then apply healing to fix inconsistencies with the older state /// Returns true if all state was fetched or false if the block is too old and the state is no longer available From ae0f9e23fa3a35323dbf64473c42246bf7de29fd Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 12:29:23 -0300 Subject: [PATCH 162/189] restore tracing --- crates/networking/p2p/net.rs | 6 +++--- crates/networking/rpc/engine/payload.rs | 10 +++++----- crates/networking/rpc/eth/block.rs | 2 +- crates/networking/rpc/eth/client.rs | 2 +- crates/networking/rpc/rpc.rs | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 726d146c6f..cdbdf24fc0 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -25,7 +25,7 @@ use tokio::{ sync::{broadcast, Mutex}, try_join, }; -use tracing::{debug, info}; +use tracing::{debug, error, info}; use types::{Endpoint, Node}; pub mod bootnode; @@ -214,14 +214,14 @@ async fn discover_peers_server( }; if let Some(peer) = peer { if peer.last_ping_hash.is_none() { - debug!("Discarding pong as the node did not send a previous ping"); + error!("Discarding pong as the node did not send a previous ping"); continue; } if peer.last_ping_hash.unwrap() == msg.ping_hash { table.lock().await.pong_answered(peer.node.node_id); // TODO: This is a hacky fix for the problem reported in https://github.com/lambdaclass/ethrex/issues/1684 if peer.channels.is_some() { - debug!( + error!( "Skip trying to connect to already connected peer {}", peer.node.node_id ); diff --git a/crates/networking/rpc/engine/payload.rs b/crates/networking/rpc/engine/payload.rs index 2db79846d6..e6b382a118 100644 --- a/crates/networking/rpc/engine/payload.rs +++ b/crates/networking/rpc/engine/payload.rs @@ -4,7 +4,7 @@ use ethrex_blockchain::payload::build_payload; use ethrex_core::types::{BlobsBundle, Block, BlockBody, BlockHash, BlockNumber, Fork}; use ethrex_core::{H256, U256}; use serde_json::Value; -use tracing::{debug, error, warn}; +use tracing::{debug, error, info, warn}; use crate::types::payload::{ ExecutionPayload, ExecutionPayloadBody, ExecutionPayloadResponse, PayloadStatus, @@ -369,7 +369,7 @@ fn get_block_from_payload( parent_beacon_block_root: Option, ) -> Result { let block_hash = payload.block_hash; - debug!("Received new payload with block hash: {block_hash:#x}"); + info!("Received new payload with block hash: {block_hash:#x}"); payload .clone() @@ -398,7 +398,7 @@ fn execute_payload(block: &Block, context: &RpcApiContext) -> Result Ok(PayloadStatus::syncing()), // Under the current implementation this is not possible: we always calculate the state @@ -430,7 +430,7 @@ fn execute_payload(block: &Block, context: &RpcApiContext) -> Result { - debug!("Block with hash {block_hash} executed and added to storage succesfully"); + info!("Block with hash {block_hash} executed and added to storage succesfully"); Ok(PayloadStatus::valid_with_hash(block_hash)) } } @@ -463,7 +463,7 @@ fn get_payload( payload_id: u64, context: &RpcApiContext, ) -> Result<(Block, U256, BlobsBundle, bool), RpcErr> { - debug!("Requested payload with id: {:#018x}", payload_id); + info!("Requested payload with id: {:#018x}", payload_id); let payload = context.storage.get_payload(payload_id)?; let Some((payload_block, block_value, blobs_bundle, completed)) = payload else { diff --git a/crates/networking/rpc/eth/block.rs b/crates/networking/rpc/eth/block.rs index 76a4cc3013..ad73070823 100644 --- a/crates/networking/rpc/eth/block.rs +++ b/crates/networking/rpc/eth/block.rs @@ -70,7 +70,7 @@ impl RpcHandler for GetBlockByNumberRequest { } fn handle(&self, context: RpcApiContext) -> Result { let storage = &context.storage; - debug!("Requested block with number: {}", self.block); + info!("Requested block with number: {}", self.block); let block_number = match self.block.resolve_block_number(storage)? { Some(block_number) => block_number, _ => return Ok(Value::Null), diff --git a/crates/networking/rpc/eth/client.rs b/crates/networking/rpc/eth/client.rs index bcc3ef3349..5601fe239b 100644 --- a/crates/networking/rpc/eth/client.rs +++ b/crates/networking/rpc/eth/client.rs @@ -10,7 +10,7 @@ impl RpcHandler for ChainId { } fn handle(&self, context: RpcApiContext) -> Result { - debug!("Requested chain id"); + info!("Requested chain id"); let chain_spec = context .storage .get_chain_config() diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index f68fd3c5d7..cbd28f217e 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -144,9 +144,9 @@ pub async fn start_api( let filters = active_filters.clone(); loop { interval.tick().await; - tracing::debug!("Running filter clean task"); + tracing::info!("Running filter clean task"); filter::clean_outdated_filters(filters.clone(), FILTER_DURATION); - tracing::debug!("Filter clean task complete"); + tracing::info!("Filter clean task complete"); } }); From 7d956ef8e1f4824ef2d006a2c6673d972096777f Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 12:38:49 -0300 Subject: [PATCH 163/189] Doc new storage methods --- crates/storage/store/engines/api.rs | 52 ++++++++++++++++++----------- crates/storage/store/storage.rs | 11 +++++- 2 files changed, 42 insertions(+), 21 deletions(-) diff --git a/crates/storage/store/engines/api.rs b/crates/storage/store/engines/api.rs index c146b2ea16..168cb980c6 100644 --- a/crates/storage/store/engines/api.rs +++ b/crates/storage/store/engines/api.rs @@ -175,61 +175,61 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { /// Returns the stored chain configuration fn get_chain_config(&self) -> Result; - // Update earliest block number + /// Update earliest block number fn update_earliest_block_number(&self, block_number: BlockNumber) -> Result<(), StoreError>; - // Obtain earliest block number + /// Obtain earliest block number fn get_earliest_block_number(&self) -> Result, StoreError>; - // Update finalized block number + /// Update finalized block number fn update_finalized_block_number(&self, block_number: BlockNumber) -> Result<(), StoreError>; - // Obtain finalized block number + /// Obtain finalized block number fn get_finalized_block_number(&self) -> Result, StoreError>; - // Update safe block number + /// Update safe block number fn update_safe_block_number(&self, block_number: BlockNumber) -> Result<(), StoreError>; - // Obtain safe block number + /// Obtain safe block number fn get_safe_block_number(&self) -> Result, StoreError>; - // Update latest block number + /// Update latest block number fn update_latest_block_number(&self, block_number: BlockNumber) -> Result<(), StoreError>; - // Obtain latest block number + /// Obtain latest block number fn get_latest_block_number(&self) -> Result, StoreError>; // TODO (#307): Remove TotalDifficulty. - // Update latest total difficulty + /// Update latest total difficulty fn update_latest_total_difficulty( &self, latest_total_difficulty: U256, ) -> Result<(), StoreError>; // TODO (#307): Remove TotalDifficulty. - // Obtain latest total difficulty + /// Obtain latest total difficulty fn get_latest_total_difficulty(&self) -> Result, StoreError>; - // Update pending block number + /// Update pending block number fn update_pending_block_number(&self, block_number: BlockNumber) -> Result<(), StoreError>; - // Obtain pending block number + /// Obtain pending block number fn get_pending_block_number(&self) -> Result, StoreError>; - // Obtain a storage trie from the given address and storage_root - // Doesn't check if the account is stored - // Used for internal store operations + /// Obtain a storage trie from the given address and storage_root + /// Doesn't check if the account is stored + /// Used for internal store operations fn open_storage_trie(&self, hashed_address: H256, storage_root: H256) -> Trie; - // Obtain a state trie from the given state root - // Doesn't check if the state root is valid - // Used for internal store operations + /// Obtain a state trie from the given state root + /// Doesn't check if the state root is valid + /// Used for internal store operations fn open_state_trie(&self, state_root: H256) -> Trie; - // Set the canonical block hash for a given block number. + /// Set the canonical block hash for a given block number. fn set_canonical_block(&self, number: BlockNumber, hash: BlockHash) -> Result<(), StoreError>; - // Unsets canonical block for a block number. + /// Unsets canonical block for a block number. fn unset_canonical_block(&self, number: BlockNumber) -> Result<(), StoreError>; fn add_payload(&self, payload_id: u64, block: Block) -> Result<(), StoreError>; @@ -252,27 +252,39 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { // Snap State methods + /// Sets the hash of the last header downloaded during a snap sync fn set_header_download_checkpoint(&self, block_hash: BlockHash) -> Result<(), StoreError>; + /// Gets the hash of the last header downloaded during a snap sync fn get_header_download_checkpoint(&self) -> Result, StoreError>; + /// Clears the hash of the last header downloaded during a snap sync fn clear_header_download_checkpoint(&self) -> Result<(), StoreError>; + /// Sets the current state root of the state trie being rebuilt during snap sync fn set_state_trie_root_checkpoint(&self, current_root: H256) -> Result<(), StoreError>; + /// Gets the current state root of the state trie being rebuilt during snap sync fn get_state_trie_root_checkpoint(&self) -> Result, StoreError>; + /// Clears the current state root of the state trie being rebuilt during snap sync fn clear_state_trie_root_checkpoint(&self) -> Result<(), StoreError>; + /// Sets the last key fetched from the state trie being fetched during snap sync fn set_state_trie_key_checkpoint(&self, last_key: H256) -> Result<(), StoreError>; + /// Gets the last key fetched from the state trie being fetched during snap sync fn get_state_trie_key_checkpoint(&self) -> Result, StoreError>; + /// Clears the last key fetched from the state trie being fetched during snap sync fn clear_state_trie_key_checkpoint(&self) -> Result<(), StoreError>; + /// Sets the list of account hashes whose storage needs healing fn set_pending_storage_heal_accounts(&self, accounts: Vec) -> Result<(), StoreError>; + /// Gets the list of account hashes whos storage needs healing fn get_pending_storage_heal_accounts(&self) -> Result>, StoreError>; + /// Clears the list of account hashes whose storage needs healing fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError>; } diff --git a/crates/storage/store/storage.rs b/crates/storage/store/storage.rs index e4dafb9901..33644f43c2 100644 --- a/crates/storage/store/storage.rs +++ b/crates/storage/store/storage.rs @@ -1000,40 +1000,49 @@ impl Store { .is_some()) } + /// Sets the hash of the last header downloaded during a snap sync pub fn set_header_download_checkpoint(&self, block_hash: BlockHash) -> Result<(), StoreError> { self.engine.set_header_download_checkpoint(block_hash) } + /// Gets the hash of the last header downloaded during a snap sync pub fn get_header_download_checkpoint(&self) -> Result, StoreError> { self.engine.get_header_download_checkpoint() } + /// Sets the current state root of the state trie being rebuilt during snap sync pub fn set_state_trie_root_checkpoint(&self, current_root: H256) -> Result<(), StoreError> { self.engine.set_state_trie_root_checkpoint(current_root) } + /// Gets the current state root of the state trie being rebuilt during snap sync pub fn get_state_trie_root_checkpoint(&self) -> Result, StoreError> { self.engine.get_state_trie_root_checkpoint() } + /// Sets the last key fetched from the state trie being fetched during snap sync pub fn set_state_trie_key_checkpoint(&self, last_key: H256) -> Result<(), StoreError> { self.engine.set_state_trie_key_checkpoint(last_key) } + /// Gets the last key fetched from the state trie being fetched during snap sync pub fn get_state_trie_key_checkpoint(&self) -> Result, StoreError> { self.engine.get_state_trie_key_checkpoint() } + /// Sets the list of account hashes whose storage needs healing pub fn set_pending_storage_heal_accounts(&self, accounts: Vec) -> Result<(), StoreError> { self.engine.set_pending_storage_heal_accounts(accounts) } + /// Gets the list of account hashes whose storage needs healing pub fn get_pending_storage_heal_accounts(&self) -> Result>, StoreError> { self.engine.get_pending_storage_heal_accounts() } + /// Clears all checkpoints written during a snap sync pub fn clear_snap_state(&self) -> Result<(), StoreError> { - //self.engine.clear_header_download_checkpoint()?; TODO: Uncomment + self.engine.clear_header_download_checkpoint()?; self.engine.clear_pending_storage_heal_accounts()?; self.engine.clear_state_trie_root_checkpoint()?; self.engine.clear_state_trie_key_checkpoint() From fd5791e3485828daf1aef5a2f5c37ea077193349 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 14:11:52 -0300 Subject: [PATCH 164/189] Fix potential unwrap --- crates/networking/p2p/sync.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index a62e20ac6b..b080bbe374 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -132,14 +132,14 @@ impl SyncManager { .iter() .map(|header| header.compute_block_hash()) .collect::>(); - // Discard the first header as we already have it - block_hashes.remove(0); - block_headers.remove(0); // Check if we already found the sync head let sync_head_found = block_hashes.contains(&sync_head); // Update current fetch head if needed if !sync_head_found { current_head = *block_hashes.last().unwrap(); + // Discard the first header as we already have it + block_hashes.remove(0); + block_headers.remove(0); // Update snap state store.set_header_download_checkpoint(current_head)?; } From 5eaac44a588d5bd17085b288834be28db0699ecd Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 14:17:39 -0300 Subject: [PATCH 165/189] Fix conditional --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index b080bbe374..b6e530e87b 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -442,7 +442,7 @@ async fn rebuild_state_trie( ); store.set_pending_storage_heal_accounts(stored_pending_storages)?; } - if retry_count > MAX_RETRIES || !pending_storages { + if retry_count > MAX_RETRIES || pending_storages { // Skip healing and return stale status return Ok(false); } From 707b760833b6b2189f529592011e0ad6d44fc0a1 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 15:05:05 -0300 Subject: [PATCH 166/189] Fix logic --- crates/networking/p2p/sync.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index b6e530e87b..fb07250be6 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -137,12 +137,12 @@ impl SyncManager { // Update current fetch head if needed if !sync_head_found { current_head = *block_hashes.last().unwrap(); - // Discard the first header as we already have it - block_hashes.remove(0); - block_headers.remove(0); // Update snap state store.set_header_download_checkpoint(current_head)?; } + // Discard the first header as we already have it + block_hashes.remove(0); + block_headers.remove(0); // Store headers and save hashes for full block retrieval all_block_hashes.extend_from_slice(&block_hashes[..]); store.add_block_headers(block_hashes, block_headers.clone())?; From 0a1bc341cf5d3b5d3cb8ec88c71889f8aa44679e Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 16:44:52 -0300 Subject: [PATCH 167/189] clippy --- crates/networking/p2p/rlpx/eth/backend.rs | 1 - crates/networking/p2p/rlpx/handshake.rs | 1 - crates/networking/rpc/eth/block.rs | 2 +- crates/networking/rpc/eth/client.rs | 2 +- 4 files changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/networking/p2p/rlpx/eth/backend.rs b/crates/networking/p2p/rlpx/eth/backend.rs index 171ebf66f7..7526ba696f 100644 --- a/crates/networking/p2p/rlpx/eth/backend.rs +++ b/crates/networking/p2p/rlpx/eth/backend.rs @@ -1,6 +1,5 @@ use ethrex_core::{types::ForkId, U256}; use ethrex_storage::Store; -use tracing::warn; use crate::rlpx::error::RLPxError; diff --git a/crates/networking/p2p/rlpx/handshake.rs b/crates/networking/p2p/rlpx/handshake.rs index 0d5b0685aa..33ba010de7 100644 --- a/crates/networking/p2p/rlpx/handshake.rs +++ b/crates/networking/p2p/rlpx/handshake.rs @@ -13,7 +13,6 @@ use k256::{ PublicKey, SecretKey, }; use rand::Rng; -use tracing::debug; use super::error::RLPxError; diff --git a/crates/networking/rpc/eth/block.rs b/crates/networking/rpc/eth/block.rs index ad73070823..b226a390f5 100644 --- a/crates/networking/rpc/eth/block.rs +++ b/crates/networking/rpc/eth/block.rs @@ -1,7 +1,7 @@ use ethrex_blockchain::find_parent_header; use ethrex_rlp::encode::RLPEncode; use serde_json::Value; -use tracing::{debug, info}; +use tracing::info; use crate::{ types::{ diff --git a/crates/networking/rpc/eth/client.rs b/crates/networking/rpc/eth/client.rs index 5601fe239b..f090b0c701 100644 --- a/crates/networking/rpc/eth/client.rs +++ b/crates/networking/rpc/eth/client.rs @@ -1,5 +1,5 @@ use serde_json::Value; -use tracing::{debug, info}; +use tracing::info; use crate::{utils::RpcErr, RpcApiContext, RpcHandler}; From 18347d0e299d852963f839dbd5ac52508f4827f4 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 21 Jan 2025 18:55:58 -0300 Subject: [PATCH 168/189] clippy --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index f44896eae4..ba878cc316 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -837,7 +837,7 @@ async fn storage_healer( stale |= is_stale; } } - Ok(pending_storages.into_iter().map(|(h, _)| h).collect()) + Ok(pending_storages.into_keys().collect()) } /// Receives a set of storage trie paths (grouped by their corresponding account's state trie path), From 3094fc57edece89a074983ed7a9a98e3e9f83fa0 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 12:00:33 -0300 Subject: [PATCH 169/189] Fix logic --- crates/networking/rpc/rpc.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index cbd28f217e..2273e52fd1 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -88,7 +88,7 @@ impl RpcApiContext { /// Returns the engine's current sync status, see [SyncStatus] pub fn sync_status(&self) -> Result { // Try to get hold of the sync manager, if we can't then it means it is currently involved in a sync process - Ok(if self.syncer.try_lock().is_ok() { + Ok(if self.syncer.try_lock().is_err() { SyncStatus::Active // Check if there is a checkpoint left from a previous aborted sync } else if self.storage.get_header_download_checkpoint()?.is_some() { From ae46ef5ec8945029154e3f57fc84997743aa9950 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 15:19:47 -0300 Subject: [PATCH 170/189] Simplify pivot selection + fix post-pivot exec --- crates/networking/p2p/sync.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index ba878cc316..2fc0829be3 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -161,11 +161,7 @@ impl SyncManager { // - Fetch each block's body and its receipt via eth p2p requests // - Fetch the pivot block's state via snap p2p requests // - Execute blocks after the pivot (like in full-sync) - let pivot_idx = if all_block_hashes.len() > MIN_FULL_BLOCKS { - all_block_hashes.len() - MIN_FULL_BLOCKS - } else { - all_block_hashes.len() - 1 - }; + let pivot_idx = all_block_hashes.len().checked_sub(MIN_FULL_BLOCKS).unwrap_or_default(); let pivot_header = store .get_block_header_by_hash(all_block_hashes[pivot_idx])? .ok_or(SyncError::CorruptDB)?; @@ -174,7 +170,7 @@ impl SyncManager { pivot_header.number ); let store_bodies_handle = tokio::spawn(store_block_bodies( - all_block_hashes[pivot_idx..].to_vec(), + all_block_hashes[pivot_idx+1..].to_vec(), self.peers.clone(), store.clone(), )); @@ -189,7 +185,8 @@ impl SyncManager { store_bodies_handle.await??; // For all blocks before the pivot: Store the bodies and fetch the receipts (TODO) // For all blocks after the pivot: Process them fully - for hash in &all_block_hashes[pivot_idx..] { + info!("Executing blocks past pivot"); + for hash in &all_block_hashes[pivot_idx+1..] { let block = store .get_block_by_hash(*hash)? .ok_or(SyncError::CorruptDB)?; From f1e8c44781f914c4b21d611330ccf722e1001414 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 15:56:54 -0300 Subject: [PATCH 171/189] Switch to full-sync if we have very few blocks between syc head and latest head --- crates/networking/p2p/sync.rs | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 2fc0829be3..f7da149aee 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -132,8 +132,23 @@ impl SyncManager { // Update current fetch head if needed if !sync_head_found { current_head = *block_hashes.last().unwrap(); - // Update snap state - store.set_header_download_checkpoint(current_head)?; + } + if matches!(self.sync_mode, SyncMode::Snap) { + if !sync_head_found { + // Update snap state + store.set_header_download_checkpoint(current_head)?; + } else { + // If the sync head is less than 64 blocks away from our current head switch to full-sync + let last_header_number = block_headers.last().unwrap().number; + let latest_block_number = store.get_latest_block_number()?; + if last_header_number.saturating_sub(latest_block_number) + < MIN_FULL_BLOCKS as u64 + { + // Too few blocks for a snap sync, switching to full sync + store.clear_snap_state()?; + self.sync_mode = SyncMode::Full + } + } } // Discard the first header as we already have it block_hashes.remove(0); @@ -161,7 +176,10 @@ impl SyncManager { // - Fetch each block's body and its receipt via eth p2p requests // - Fetch the pivot block's state via snap p2p requests // - Execute blocks after the pivot (like in full-sync) - let pivot_idx = all_block_hashes.len().checked_sub(MIN_FULL_BLOCKS).unwrap_or_default(); + let pivot_idx = all_block_hashes + .len() + .checked_sub(MIN_FULL_BLOCKS) + .unwrap_or_default(); let pivot_header = store .get_block_header_by_hash(all_block_hashes[pivot_idx])? .ok_or(SyncError::CorruptDB)?; @@ -170,7 +188,7 @@ impl SyncManager { pivot_header.number ); let store_bodies_handle = tokio::spawn(store_block_bodies( - all_block_hashes[pivot_idx+1..].to_vec(), + all_block_hashes[pivot_idx + 1..].to_vec(), self.peers.clone(), store.clone(), )); @@ -186,7 +204,7 @@ impl SyncManager { // For all blocks before the pivot: Store the bodies and fetch the receipts (TODO) // For all blocks after the pivot: Process them fully info!("Executing blocks past pivot"); - for hash in &all_block_hashes[pivot_idx+1..] { + for hash in &all_block_hashes[pivot_idx + 1..] { let block = store .get_block_by_hash(*hash)? .ok_or(SyncError::CorruptDB)?; From c7d4e9594377421f68b336f5529f7e760e946100 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 16:00:37 -0300 Subject: [PATCH 172/189] Remove debug line --- crates/networking/p2p/sync.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index f7da149aee..0f9cebc065 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -203,7 +203,6 @@ impl SyncManager { store_bodies_handle.await??; // For all blocks before the pivot: Store the bodies and fetch the receipts (TODO) // For all blocks after the pivot: Process them fully - info!("Executing blocks past pivot"); for hash in &all_block_hashes[pivot_idx + 1..] { let block = store .get_block_by_hash(*hash)? From d7dec4e4c2ce5f3c0c01dedd9d3725c66384a0e2 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 16:42:15 -0300 Subject: [PATCH 173/189] Filter out storages without paths left to fetch --- crates/networking/p2p/sync.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 0f9cebc065..7ba126775c 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -895,6 +895,8 @@ async fn heal_storage_batch( } } // Return remaining and added paths to be added to the queue + // Filter out the storages we completely fetched + batch.retain(|_, v|!v.1.is_empty()); return Ok((batch, false)); } } From f0c544c05445b24a5d07252fccf019bf4fb04ff5 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 17:05:31 -0300 Subject: [PATCH 174/189] fmt --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 7ba126775c..56a21bf859 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -896,7 +896,7 @@ async fn heal_storage_batch( } // Return remaining and added paths to be added to the queue // Filter out the storages we completely fetched - batch.retain(|_, v|!v.1.is_empty()); + batch.retain(|_, v| !v.1.is_empty()); return Ok((batch, false)); } } From ba683484daef8fe7f23f6d2bc1cae50404a0b568 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 17:19:41 -0300 Subject: [PATCH 175/189] Fix issue link --- crates/networking/p2p/rlpx/connection.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index f7cf99cc7e..de0f35ced3 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -320,7 +320,7 @@ impl RLPxConnection { tokio::select! { // Expect a message from the remote peer message = self.receive() => { - // TODO: This is a hacky fix for the problem reported in https://github.com/lambdaclass/ethrex/issues/1685 + // TODO: This is a hacky fix for the problem reported in https://github.com/lambdaclass/ethrex/issues/1686 let res = self.handle_message(message?, sender.clone()).await; if let Err(err) = res { warn!("Handle message failed with {err:?}"); From 6afac6bbe65bce2ce4184f28c6bdc0aaac86995b Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 17:25:36 -0300 Subject: [PATCH 176/189] Amend diffs --- crates/networking/p2p/rlpx/connection.rs | 4 ++-- crates/networking/p2p/sync.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index de0f35ced3..7d714f7f83 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -215,11 +215,11 @@ impl RLPxConnection { reason: self.match_disconnect_reason(&error), })) .await - .unwrap_or_else(|e| debug!("Could not send Disconnect message: ({e}).")); + .unwrap_or_else(|e| error!("Could not send Disconnect message: ({e}).")); // Discard peer from kademlia table let remote_node_id = self.remote_node_id; - debug!("{error_text}: ({error}), discarding peer {remote_node_id}"); + error!("{error_text}: ({error}), discarding peer {remote_node_id}"); table.lock().await.replace_peer(remote_node_id); } diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 56a21bf859..eedf2b5312 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -155,7 +155,7 @@ impl SyncManager { block_headers.remove(0); // Store headers and save hashes for full block retrieval all_block_hashes.extend_from_slice(&block_hashes[..]); - store.add_block_headers(block_hashes, block_headers.clone())?; + store.add_block_headers(block_hashes, block_headers)?; if sync_head_found { // No more headers to request From a93e847b4f5325a428df215859d7d13e9ee449c5 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 17:35:58 -0300 Subject: [PATCH 177/189] Prioritize reaching leaves on state trie heal + increase batch size for node requests --- crates/networking/p2p/sync.rs | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index eedf2b5312..c90d378c5b 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -26,6 +26,8 @@ const MAX_RETRIES: usize = 5; const MIN_FULL_BLOCKS: usize = 64; /// Max size of a bach to stat a fetch request in queues const BATCH_SIZE: usize = 300; +/// Max size of a bach to stat a fetch request in queues for nodes +const NODE_BATCH_SIZE: usize = 900; #[derive(Debug)] pub enum SyncMode { @@ -546,16 +548,18 @@ async fn storage_fetcher( // or if we have no more incoming batches, spawn a fetch process // If the pivot became stale don't process anything and just save incoming requests while !stale - && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) + && (pending_storage.len() >= NODE_BATCH_SIZE + || !incoming && !pending_storage.is_empty()) { // We will be spawning multiple tasks and then collecting their results // This uses a loop inside the main loop as the result from these tasks may lead to more values in queue let mut storage_tasks = tokio::task::JoinSet::new(); while !stale - && (pending_storage.len() >= BATCH_SIZE || !incoming && !pending_storage.is_empty()) + && (pending_storage.len() >= NODE_BATCH_SIZE + || !incoming && !pending_storage.is_empty()) { let next_batch = pending_storage - .drain(..BATCH_SIZE.min(pending_storage.len())) + .drain(..NODE_BATCH_SIZE.min(pending_storage.len())) .collect::>(); storage_tasks.spawn(fetch_storage_batch( next_batch.clone(), @@ -733,11 +737,15 @@ async fn heal_state_trie( // Count the number of request retries so we don't get stuck requesting old state let mut retry_count = 0; while !paths.is_empty() && retry_count < MAX_RETRIES { + let batch = if paths.len() <= NODE_BATCH_SIZE { + paths.clone() + } else { + // Take the latest paths first so we prioritize reaching leaves (depht search) + paths[paths.len() - NODE_BATCH_SIZE..].to_vec() + }; let peer = get_peer_channel_with_retry(peers.clone(), Capability::Snap).await; - if let Some(nodes) = peer - .request_state_trienodes(state_root, paths.clone()) - .await - { + if let Some(nodes) = peer.request_state_trienodes(state_root, batch).await { + debug!("Received {} state nodes", nodes.len()); // Reset retry counter for next request retry_count = 0; let mut hahsed_addresses = vec![]; From d4e299d20e2e6a808fa7892c6b60e3efbe20e5ad Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 17:52:24 -0300 Subject: [PATCH 178/189] Fix --- crates/networking/p2p/net.rs | 12 ++++++++++++ crates/networking/p2p/sync.rs | 18 ++++++++++-------- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 03849750e9..9873d095c0 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -413,6 +413,7 @@ async fn peers_revalidation( // first check that the peers we ping have responded for node_id in previously_pinged_peers { let mut table = table.lock().await; +<<<<<<< HEAD if let Some(peer) = table.get_by_node_id_mut(node_id) { if let Some(has_answered) = peer.revalidation { if has_answered { @@ -420,6 +421,17 @@ async fn peers_revalidation( } else { peer.decrement_liveness(); } +======= + let Some(peer) = table.get_by_node_id_mut(node_id) else { + continue; + }; + + if let Some(has_answered) = peer.revalidation { + if has_answered { + peer.increment_liveness(); + } else { + peer.decrement_liveness(); +>>>>>>> 384f1b2c (Fix) } peer.revalidation = None; diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index c90d378c5b..23eaae09b1 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -737,14 +737,15 @@ async fn heal_state_trie( // Count the number of request retries so we don't get stuck requesting old state let mut retry_count = 0; while !paths.is_empty() && retry_count < MAX_RETRIES { - let batch = if paths.len() <= NODE_BATCH_SIZE { - paths.clone() + let batch: Vec = if paths.len() <= NODE_BATCH_SIZE { + paths.drain(..) } else { // Take the latest paths first so we prioritize reaching leaves (depht search) - paths[paths.len() - NODE_BATCH_SIZE..].to_vec() - }; - let peer = get_peer_channel_with_retry(peers.clone(), Capability::Snap).await; - if let Some(nodes) = peer.request_state_trienodes(state_root, batch).await { + paths.drain(paths.len() - NODE_BATCH_SIZE..) + //paths[paths.len() - NODE_BATCH_SIZE..].to_vec() + }.collect(); + let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; + if let Some(nodes) = peer.request_state_trienodes(state_root, batch.clone()).await { debug!("Received {} state nodes", nodes.len()); // Reset retry counter for next request retry_count = 0; @@ -754,10 +755,11 @@ async fn heal_state_trie( // - Add its children to the queue (if we don't have them already) // - If it is a leaf, request its bytecode & storage // - If it is a leaf, add its path & value to the trie - for node in nodes { + // Add unfetched nodes back to the queue (we do this first to ensure deph-focused fetching) + paths.extend_from_slice(&batch[nodes.len()..]); + for (node, path) in nodes.into_iter().zip(batch.into_iter()) { // We cannot keep the trie state open let mut trie = store.open_state_trie(current_root); - let path = paths.remove(0); paths.extend(node_missing_children(&node, &path, trie.state())?); if let Node::Leaf(node) = &node { // Fetch bytecode & storage From 3ead985fb162cba1ca4d21a4d5eff9dbc2d86cf1 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 17:58:59 -0300 Subject: [PATCH 179/189] Cleaning --- crates/networking/p2p/sync.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 23eaae09b1..e1adc17513 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -737,13 +737,8 @@ async fn heal_state_trie( // Count the number of request retries so we don't get stuck requesting old state let mut retry_count = 0; while !paths.is_empty() && retry_count < MAX_RETRIES { - let batch: Vec = if paths.len() <= NODE_BATCH_SIZE { - paths.drain(..) - } else { - // Take the latest paths first so we prioritize reaching leaves (depht search) - paths.drain(paths.len() - NODE_BATCH_SIZE..) - //paths[paths.len() - NODE_BATCH_SIZE..].to_vec() - }.collect(); + // Fetch the latests paths first to prioritize reaching leaves as soon as possible + let batch: Vec = paths.drain(paths.len().checked_sub(NODE_BATCH_SIZE).unwrap_or_default()..).collect(); let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; if let Some(nodes) = peer.request_state_trienodes(state_root, batch.clone()).await { debug!("Received {} state nodes", nodes.len()); From e08cc0a6422111eb960f61e91486a12be4c03c75 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 18:25:06 -0300 Subject: [PATCH 180/189] Fix cherry pick conflicts --- crates/networking/p2p/net.rs | 12 ------------ crates/networking/p2p/sync.rs | 2 +- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/crates/networking/p2p/net.rs b/crates/networking/p2p/net.rs index 9873d095c0..03849750e9 100644 --- a/crates/networking/p2p/net.rs +++ b/crates/networking/p2p/net.rs @@ -413,7 +413,6 @@ async fn peers_revalidation( // first check that the peers we ping have responded for node_id in previously_pinged_peers { let mut table = table.lock().await; -<<<<<<< HEAD if let Some(peer) = table.get_by_node_id_mut(node_id) { if let Some(has_answered) = peer.revalidation { if has_answered { @@ -421,17 +420,6 @@ async fn peers_revalidation( } else { peer.decrement_liveness(); } -======= - let Some(peer) = table.get_by_node_id_mut(node_id) else { - continue; - }; - - if let Some(has_answered) = peer.revalidation { - if has_answered { - peer.increment_liveness(); - } else { - peer.decrement_liveness(); ->>>>>>> 384f1b2c (Fix) } peer.revalidation = None; diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index e1adc17513..dd25f578db 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -739,7 +739,7 @@ async fn heal_state_trie( while !paths.is_empty() && retry_count < MAX_RETRIES { // Fetch the latests paths first to prioritize reaching leaves as soon as possible let batch: Vec = paths.drain(paths.len().checked_sub(NODE_BATCH_SIZE).unwrap_or_default()..).collect(); - let peer = peers.lock().await.get_peer_channels(Capability::Snap).await; + let peer = get_peer_channel_with_retry(peers.clone(), Capability::Snap).await; if let Some(nodes) = peer.request_state_trienodes(state_root, batch.clone()).await { debug!("Received {} state nodes", nodes.len()); // Reset retry counter for next request From 13c0a1df2e6648f81371821d2ebd37d558db1406 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Wed, 22 Jan 2025 18:53:12 -0300 Subject: [PATCH 181/189] Fix leftover conflicts --- crates/storage/store/engines/redb.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/storage/store/engines/redb.rs b/crates/storage/store/engines/redb.rs index 83b781f9d4..6b6124dc37 100644 --- a/crates/storage/store/engines/redb.rs +++ b/crates/storage/store/engines/redb.rs @@ -701,7 +701,6 @@ impl StoreEngine for RedBStore { .collect()) } -<<<<<<< HEAD fn set_header_download_checkpoint(&self, block_hash: BlockHash) -> Result<(), StoreError> { self.write( SNAP_STATE_TABLE, @@ -777,7 +776,7 @@ impl StoreEngine for RedBStore { fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError> { self.delete(SNAP_STATE_TABLE, SnapStateIndex::PendingStorageHealAccounts) } -======= + fn is_synced(&self) -> Result { match self.read(CHAIN_DATA_TABLE, ChainDataIndex::IsSynced)? { None => Err(StoreError::Custom("Sync status not found".to_string())), @@ -792,7 +791,6 @@ impl StoreEngine for RedBStore { status.encode_to_vec(), ) } ->>>>>>> 042c24f77abf1d4950378b837ed1583370ec17cc } impl redb::Value for ChainDataIndex { From adc4df77ed8bb08b299ab1195bb5a8319d706559 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 23 Jan 2025 10:45:15 -0300 Subject: [PATCH 182/189] Use saturating sub --- crates/networking/p2p/sync.rs | 14 ++++++++------ crates/storage/store/engines/api.rs | 2 +- crates/storage/store/engines/libmdbx.rs | 2 +- crates/storage/store/engines/redb.rs | 2 +- crates/storage/store/storage.rs | 2 +- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index dd25f578db..3b0497c38c 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -178,10 +178,7 @@ impl SyncManager { // - Fetch each block's body and its receipt via eth p2p requests // - Fetch the pivot block's state via snap p2p requests // - Execute blocks after the pivot (like in full-sync) - let pivot_idx = all_block_hashes - .len() - .checked_sub(MIN_FULL_BLOCKS) - .unwrap_or_default(); + let pivot_idx = all_block_hashes.len().saturating_sub(MIN_FULL_BLOCKS); let pivot_header = store .get_block_header_by_hash(all_block_hashes[pivot_idx])? .ok_or(SyncError::CorruptDB)?; @@ -738,9 +735,14 @@ async fn heal_state_trie( let mut retry_count = 0; while !paths.is_empty() && retry_count < MAX_RETRIES { // Fetch the latests paths first to prioritize reaching leaves as soon as possible - let batch: Vec = paths.drain(paths.len().checked_sub(NODE_BATCH_SIZE).unwrap_or_default()..).collect(); + let batch: Vec = paths + .drain(paths.len().saturating_sub(NODE_BATCH_SIZE)..) + .collect(); let peer = get_peer_channel_with_retry(peers.clone(), Capability::Snap).await; - if let Some(nodes) = peer.request_state_trienodes(state_root, batch.clone()).await { + if let Some(nodes) = peer + .request_state_trienodes(state_root, batch.clone()) + .await + { debug!("Received {} state nodes", nodes.len()); // Reset retry counter for next request retry_count = 0; diff --git a/crates/storage/store/engines/api.rs b/crates/storage/store/engines/api.rs index 52ed85d599..aca3c05fbe 100644 --- a/crates/storage/store/engines/api.rs +++ b/crates/storage/store/engines/api.rs @@ -287,7 +287,7 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { /// Clears the list of account hashes whose storage needs healing fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError>; - + fn is_synced(&self) -> Result; fn update_sync_status(&self, status: bool) -> Result<(), StoreError>; diff --git a/crates/storage/store/engines/libmdbx.rs b/crates/storage/store/engines/libmdbx.rs index 4d62f04119..703989d3c2 100644 --- a/crates/storage/store/engines/libmdbx.rs +++ b/crates/storage/store/engines/libmdbx.rs @@ -597,7 +597,7 @@ impl StoreEngine for Store { fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError> { self.delete::(SnapStateIndex::PendingStorageHealAccounts) } - + fn is_synced(&self) -> Result { match self.read::(ChainDataIndex::IsSynced)? { None => Err(StoreError::Custom("Sync status not found".to_string())), diff --git a/crates/storage/store/engines/redb.rs b/crates/storage/store/engines/redb.rs index 6b6124dc37..c19f8c6dd5 100644 --- a/crates/storage/store/engines/redb.rs +++ b/crates/storage/store/engines/redb.rs @@ -776,7 +776,7 @@ impl StoreEngine for RedBStore { fn clear_pending_storage_heal_accounts(&self) -> Result<(), StoreError> { self.delete(SNAP_STATE_TABLE, SnapStateIndex::PendingStorageHealAccounts) } - + fn is_synced(&self) -> Result { match self.read(CHAIN_DATA_TABLE, ChainDataIndex::IsSynced)? { None => Err(StoreError::Custom("Sync status not found".to_string())), diff --git a/crates/storage/store/storage.rs b/crates/storage/store/storage.rs index 5985e47141..da8863b59b 100644 --- a/crates/storage/store/storage.rs +++ b/crates/storage/store/storage.rs @@ -1050,7 +1050,7 @@ impl Store { self.engine.clear_state_trie_root_checkpoint()?; self.engine.clear_state_trie_key_checkpoint() } - + pub fn is_synced(&self) -> Result { self.engine.is_synced() } From e62df207fc5495017f5a42dc72ac78e40177c826 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 23 Jan 2025 12:23:28 -0300 Subject: [PATCH 183/189] Remove hacky fix --- crates/networking/p2p/rlpx/connection.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 6b146f2edf..20c986a846 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -320,11 +320,7 @@ impl RLPxConnection { tokio::select! { // Expect a message from the remote peer message = self.receive() => { - // TODO: This is a hacky fix for the problem reported in https://github.com/lambdaclass/ethrex/issues/1686 - let res = self.handle_message(message?, sender.clone()).await; - if let Err(err) = res { - warn!("Handle message failed with {err:?}"); - } + self.handle_message(message?, sender.clone()).await?; } // Expect a message from the backend Some(message) = receiver.recv() => { From 61261fcf97b30254f97a61e1a9d5af6629b35617 Mon Sep 17 00:00:00 2001 From: fmoletta <99273364+fmoletta@users.noreply.github.com> Date: Thu, 23 Jan 2025 15:54:44 -0300 Subject: [PATCH 184/189] remove unused import --- crates/networking/p2p/rlpx/connection.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/rlpx/connection.rs b/crates/networking/p2p/rlpx/connection.rs index 20c986a846..26b88dc16f 100644 --- a/crates/networking/p2p/rlpx/connection.rs +++ b/crates/networking/p2p/rlpx/connection.rs @@ -51,7 +51,7 @@ use tokio::{ }; use tokio_stream::StreamExt; use tokio_util::codec::Framed; -use tracing::{debug, error, warn}; +use tracing::{debug, error}; const CAP_P2P: (Capability, u8) = (Capability::P2p, 5); const CAP_ETH: (Capability, u8) = (Capability::Eth, 68); const CAP_SNAP: (Capability, u8) = (Capability::Snap, 1); From b08ff003a906df80713e26462ce435497de5a955 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Thu, 23 Jan 2025 17:15:01 -0300 Subject: [PATCH 185/189] Lower request byte limit --- crates/networking/p2p/peer_channels.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/peer_channels.rs b/crates/networking/p2p/peer_channels.rs index cbf5788cf6..aca46ef7c6 100644 --- a/crates/networking/p2p/peer_channels.rs +++ b/crates/networking/p2p/peer_channels.rs @@ -29,7 +29,7 @@ use crate::{ pub const PEER_REPLY_TIMOUT: Duration = Duration::from_secs(45); pub const MAX_MESSAGES_IN_PEER_CHANNEL: usize = 25; -pub const MAX_RESPONSE_BYTES: u64 = 2 * 1024 * 1024; +pub const MAX_RESPONSE_BYTES: u64 = 512 * 1024; pub const HASH_MAX: H256 = H256([0xFF; 32]); #[derive(Debug, Clone)] From 89ecc31fc84a2419a6085bb0f18f5c787b9b952c Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 27 Jan 2025 14:43:00 -0300 Subject: [PATCH 186/189] Update paris engine tests --- .github/workflows/ci_l1.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_l1.yaml b/.github/workflows/ci_l1.yaml index 29d0e5945d..98b99a17a2 100644 --- a/.github/workflows/ci_l1.yaml +++ b/.github/workflows/ci_l1.yaml @@ -175,7 +175,7 @@ jobs: test_pattern: "engine-cancun/Blob Transactions On Block 1|Blob Transaction Ordering, Single|Blob Transaction Ordering, Multiple Accounts|Replace Blob Transactions|Parallel Blob Transactions|ForkchoiceUpdated|GetPayload|NewPayloadV3 After Cancun|NewPayloadV3 Before Cancun|NewPayloadV3 Versioned Hashes|Incorrect BlobGasUsed|Bad Hash|ParentHash equals BlockHash|RPC:|in ForkchoiceState|Unknown|Invalid PayloadAttributes|Unique|Re-Execute Payload|In-Order Consecutive Payload|Multiple New Payloads|Valid NewPayload->|NewPayload with|Payload Build after|Build Payload with|Invalid Missing Ancestor ReOrg, StateRoot|Re-Org Back to|Re-org to Previously|Safe Re-Org to Side Chain|Transaction Re-Org|Re-Org Back into Canonical Chain, Depth=5|Suggested Fee Recipient Test|PrevRandao Opcode|Invalid NewPayload|Fork ID: Genesis=0|Fork ID: Genesis=1, Cancun=0|Fork ID: Genesis=1, Cancun=2 |Fork ID: Genesis=1, Cancun=2, BlocksBeforePeering=1|Fork ID: Genesis=1, Cancun=2, Shanghai=[^1]|Pre-Merge" - name: "Paris Engine tests" simulation: ethereum/engine - test_pattern: "engine-api/RPC|Re-Org Back to Canonical Chain From Syncing Chain|Re-org to Previously Validated Sidechain Payload|Re-Org Back into Canonical Chain, Depth=5|Safe Re-Org|Transaction Re-Org|Inconsistent|Suggested Fee|PrevRandao|Fork ID|Unknown|Invalid PayloadAttributes|Bad Hash|Unique Payload ID|Re-Execute Payload|In-Order|Multiple New Payloads|Valid NewPayload|NewPayload with|Invalid NewPayload|Payload Build|Invalid NewPayload, Transaction|ParentHash equals|Build Payload|Invalid Missing Ancestor ReOrg" + test_pattern: "engine-api/RPC|Re-org to Previously Validated Sidechain Payload|Re-Org Back into Canonical Chain, Depth=5|Safe Re-Org|Transaction Re-Org|Inconsistent|Suggested Fee|PrevRandao Opcode Transactions|Fork ID|Unknown SafeBlockHash|Unknown FinalizedBlockHash|Unique Payload ID|Re-Execute Payload|Multiple New Payloads|NewPayload with|Payload Build|Build Payload" - name: "Engine withdrawal tests" simulation: ethereum/engine test_pattern: "engine-withdrawals/engine-withdrawals test loader|GetPayloadV2 Block Value|Sync after 2 blocks - Withdrawals on Genesis|Max Initcode Size|Pre-Merge Fork Number > 0|Empty Withdrawals|Corrupted Block Hash Payload|Withdrawals Fork on Block 2|Withdrawals Fork on Block 3|GetPayloadBodies" From d7e23a28cf8545d3f70c44bafacf0190337cb545 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 27 Jan 2025 17:33:37 -0300 Subject: [PATCH 187/189] Update cancun engine tests --- .github/workflows/ci_l1.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_l1.yaml b/.github/workflows/ci_l1.yaml index 98b99a17a2..6be1d1bc84 100644 --- a/.github/workflows/ci_l1.yaml +++ b/.github/workflows/ci_l1.yaml @@ -172,7 +172,7 @@ jobs: test_pattern: engine-(auth|exchange-capabilities)/ - name: "Cancun Engine tests" simulation: ethereum/engine - test_pattern: "engine-cancun/Blob Transactions On Block 1|Blob Transaction Ordering, Single|Blob Transaction Ordering, Multiple Accounts|Replace Blob Transactions|Parallel Blob Transactions|ForkchoiceUpdated|GetPayload|NewPayloadV3 After Cancun|NewPayloadV3 Before Cancun|NewPayloadV3 Versioned Hashes|Incorrect BlobGasUsed|Bad Hash|ParentHash equals BlockHash|RPC:|in ForkchoiceState|Unknown|Invalid PayloadAttributes|Unique|Re-Execute Payload|In-Order Consecutive Payload|Multiple New Payloads|Valid NewPayload->|NewPayload with|Payload Build after|Build Payload with|Invalid Missing Ancestor ReOrg, StateRoot|Re-Org Back to|Re-org to Previously|Safe Re-Org to Side Chain|Transaction Re-Org|Re-Org Back into Canonical Chain, Depth=5|Suggested Fee Recipient Test|PrevRandao Opcode|Invalid NewPayload|Fork ID: Genesis=0|Fork ID: Genesis=1, Cancun=0|Fork ID: Genesis=1, Cancun=2 |Fork ID: Genesis=1, Cancun=2, BlocksBeforePeering=1|Fork ID: Genesis=1, Cancun=2, Shanghai=[^1]|Pre-Merge" + test_pattern: "engine-cancun/Blob Transactions On Block 1|Blob Transaction Ordering, Single|Blob Transaction Ordering, Multiple Accounts|Replace Blob Transactions|Parallel Blob Transactions|ForkchoiceUpdated|GetPayload|NewPayloadV3 After Cancun|NewPayloadV3 Before Cancun|NewPayloadV3 Versioned Hashes|Incorrect BlobGasUsed|ParentHash equals BlockHash|RPC:|in ForkchoiceState|Unknown SafeBlockHash|Unknown FinalizedBlockHash|Unique|Re-Execute Payload|Multiple New Payloads|NewPayload with|Payload Build after|Build Payload with|Re-org to Previously|Safe Re-Org to Side Chain|Transaction Re-Org|Re-Org Back into Canonical Chain, Depth=5|Suggested Fee Recipient Test|PrevRandao Opcode|Fork ID: Genesis=0|Fork ID: Genesis=1, Cancun=0|Fork ID: Genesis=1, Cancun=2 |Fork ID: Genesis=1, Cancun=2, BlocksBeforePeering=1|Fork ID: Genesis=1, Cancun=2, Shanghai=[^1]|Pre-Merge" - name: "Paris Engine tests" simulation: ethereum/engine test_pattern: "engine-api/RPC|Re-org to Previously Validated Sidechain Payload|Re-Org Back into Canonical Chain, Depth=5|Safe Re-Org|Transaction Re-Org|Inconsistent|Suggested Fee|PrevRandao Opcode Transactions|Fork ID|Unknown SafeBlockHash|Unknown FinalizedBlockHash|Unique Payload ID|Re-Execute Payload|Multiple New Payloads|NewPayload with|Payload Build|Build Payload" From 7f5c42023f1252742b8b30801b70b6ba3e76b028 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Mon, 27 Jan 2025 18:26:12 -0300 Subject: [PATCH 188/189] Update cancun engine tests --- .github/workflows/ci_l1.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_l1.yaml b/.github/workflows/ci_l1.yaml index 6be1d1bc84..b5163390c3 100644 --- a/.github/workflows/ci_l1.yaml +++ b/.github/workflows/ci_l1.yaml @@ -172,7 +172,7 @@ jobs: test_pattern: engine-(auth|exchange-capabilities)/ - name: "Cancun Engine tests" simulation: ethereum/engine - test_pattern: "engine-cancun/Blob Transactions On Block 1|Blob Transaction Ordering, Single|Blob Transaction Ordering, Multiple Accounts|Replace Blob Transactions|Parallel Blob Transactions|ForkchoiceUpdated|GetPayload|NewPayloadV3 After Cancun|NewPayloadV3 Before Cancun|NewPayloadV3 Versioned Hashes|Incorrect BlobGasUsed|ParentHash equals BlockHash|RPC:|in ForkchoiceState|Unknown SafeBlockHash|Unknown FinalizedBlockHash|Unique|Re-Execute Payload|Multiple New Payloads|NewPayload with|Payload Build after|Build Payload with|Re-org to Previously|Safe Re-Org to Side Chain|Transaction Re-Org|Re-Org Back into Canonical Chain, Depth=5|Suggested Fee Recipient Test|PrevRandao Opcode|Fork ID: Genesis=0|Fork ID: Genesis=1, Cancun=0|Fork ID: Genesis=1, Cancun=2 |Fork ID: Genesis=1, Cancun=2, BlocksBeforePeering=1|Fork ID: Genesis=1, Cancun=2, Shanghai=[^1]|Pre-Merge" + test_pattern: "engine-cancun/Blob Transactions On Block 1|Blob Transaction Ordering, Single|Blob Transaction Ordering, Multiple Accounts|Replace Blob Transactions|Parallel Blob Transactions|ForkchoiceUpdated|GetPayload|NewPayloadV3 After Cancun|NewPayloadV3 Before Cancun|NewPayloadV3 Versioned Hashes|Incorrect BlobGasUsed|ParentHash equals BlockHash|RPC:|in ForkchoiceState|Unknown SafeBlockHash|Unknown FinalizedBlockHash|Unique|Re-Execute Payload|Multiple New Payloads|NewPayload with|Build Payload with|Re-org to Previously|Safe Re-Org to Side Chain|Transaction Re-Org|Re-Org Back into Canonical Chain, Depth=5|Suggested Fee Recipient Test|PrevRandao Opcode|Fork ID: Genesis=0|Fork ID: Genesis=1, Cancun=0|Fork ID: Genesis=1, Cancun=2 |Fork ID: Genesis=1, Cancun=2, BlocksBeforePeering=1|Fork ID: Genesis=1, Cancun=2, Shanghai=[^1]|Pre-Merge" - name: "Paris Engine tests" simulation: ethereum/engine test_pattern: "engine-api/RPC|Re-org to Previously Validated Sidechain Payload|Re-Org Back into Canonical Chain, Depth=5|Safe Re-Org|Transaction Re-Org|Inconsistent|Suggested Fee|PrevRandao Opcode Transactions|Fork ID|Unknown SafeBlockHash|Unknown FinalizedBlockHash|Unique Payload ID|Re-Execute Payload|Multiple New Payloads|NewPayload with|Payload Build|Build Payload" From c0e4808bef4a0bcebf4c5fd7e7550dff7ef15c59 Mon Sep 17 00:00:00 2001 From: fmoletta Date: Tue, 28 Jan 2025 11:41:41 -0300 Subject: [PATCH 189/189] Remove elusive test --- .github/workflows/ci_l1.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_l1.yaml b/.github/workflows/ci_l1.yaml index b5163390c3..b29db214d3 100644 --- a/.github/workflows/ci_l1.yaml +++ b/.github/workflows/ci_l1.yaml @@ -172,7 +172,7 @@ jobs: test_pattern: engine-(auth|exchange-capabilities)/ - name: "Cancun Engine tests" simulation: ethereum/engine - test_pattern: "engine-cancun/Blob Transactions On Block 1|Blob Transaction Ordering, Single|Blob Transaction Ordering, Multiple Accounts|Replace Blob Transactions|Parallel Blob Transactions|ForkchoiceUpdated|GetPayload|NewPayloadV3 After Cancun|NewPayloadV3 Before Cancun|NewPayloadV3 Versioned Hashes|Incorrect BlobGasUsed|ParentHash equals BlockHash|RPC:|in ForkchoiceState|Unknown SafeBlockHash|Unknown FinalizedBlockHash|Unique|Re-Execute Payload|Multiple New Payloads|NewPayload with|Build Payload with|Re-org to Previously|Safe Re-Org to Side Chain|Transaction Re-Org|Re-Org Back into Canonical Chain, Depth=5|Suggested Fee Recipient Test|PrevRandao Opcode|Fork ID: Genesis=0|Fork ID: Genesis=1, Cancun=0|Fork ID: Genesis=1, Cancun=2 |Fork ID: Genesis=1, Cancun=2, BlocksBeforePeering=1|Fork ID: Genesis=1, Cancun=2, Shanghai=[^1]|Pre-Merge" + test_pattern: "engine-cancun/Blob Transactions On Block 1|Blob Transaction Ordering, Single|Blob Transaction Ordering, Multiple Accounts|Replace Blob Transactions|Parallel Blob Transactions|ForkchoiceUpdatedV3|ForkchoiceUpdatedV2|ForkchoiceUpdated Version|GetPayload|NewPayloadV3 After Cancun|NewPayloadV3 Before Cancun|NewPayloadV3 Versioned Hashes|Incorrect BlobGasUsed|ParentHash equals BlockHash|RPC:|in ForkchoiceState|Unknown SafeBlockHash|Unknown FinalizedBlockHash|Unique|Re-Execute Payload|Multiple New Payloads|NewPayload with|Build Payload with|Re-org to Previously|Safe Re-Org to Side Chain|Transaction Re-Org|Re-Org Back into Canonical Chain, Depth=5|Suggested Fee Recipient Test|PrevRandao Opcode|Fork ID: Genesis=0|Fork ID: Genesis=1, Cancun=0|Fork ID: Genesis=1, Cancun=2 |Fork ID: Genesis=1, Cancun=2, BlocksBeforePeering=1|Fork ID: Genesis=1, Cancun=2, Shanghai=[^1]|Pre-Merge" - name: "Paris Engine tests" simulation: ethereum/engine test_pattern: "engine-api/RPC|Re-org to Previously Validated Sidechain Payload|Re-Org Back into Canonical Chain, Depth=5|Safe Re-Org|Transaction Re-Org|Inconsistent|Suggested Fee|PrevRandao Opcode Transactions|Fork ID|Unknown SafeBlockHash|Unknown FinalizedBlockHash|Unique Payload ID|Re-Execute Payload|Multiple New Payloads|NewPayload with|Payload Build|Build Payload"