diff --git a/CHANGELOG.md b/CHANGELOG.md index 37827f3031..8c18579816 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ git # Madara Changelog ## Next release +- perf(verify_l2): parallelized l2 state root update +- perf(state_commitment): parallelized state commitment hash computations - fix(L1): fix l1 thread with battle tested implementation + removed l1-l2 - fix: update and store ConfigFetch in l2 sync(), chainId rpc call - fix: get_events paging with continuation_token diff --git a/Cargo.lock b/Cargo.lock index 1cf941b694..034dad6c47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2248,6 +2248,16 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-skiplist" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df29de440c58ca2cc6e587ec3d22347551a32435fbde9d2bff64e78a9ffa151b" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.18" @@ -5947,6 +5957,7 @@ dependencies = [ "bitvec", "blockifier", "bonsai-trie", + "crossbeam-skiplist", "env_logger", "ethers", "futures", diff --git a/Cargo.toml b/Cargo.toml index ae0366c1b3..17bdaf323d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -391,6 +391,7 @@ tracing = "0.1.37" tracing-subscriber = "0.3.16" url = "2.4.1" validator = "0.12" +crossbeam-skiplist = "0.1" [patch."https://github.com/w3f/ring-vrf"] bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf?rev=3ddc20", version = "0.0.4", rev = "3ddc20" } diff --git a/crates/client/db/src/bonsai_db.rs b/crates/client/db/src/bonsai_db.rs index 3c89dcb319..abe58a4f33 100644 --- a/crates/client/db/src/bonsai_db.rs +++ b/crates/client/db/src/bonsai_db.rs @@ -1,6 +1,4 @@ -use std::default; use std::marker::PhantomData; -use std::sync::atomic::AtomicU32; use std::sync::Arc; use bonsai_trie::id::Id; @@ -58,9 +56,9 @@ pub struct BonsaiDb { pub fn key_type(key: &DatabaseKey) -> KeyType { match key { - DatabaseKey::Trie(bytes) => return KeyType::Trie, - DatabaseKey::Flat(bytes) => return KeyType::Flat, - DatabaseKey::TrieLog(bytes) => return KeyType::TrieLog, + DatabaseKey::Trie(_) => return KeyType::Trie, + DatabaseKey::Flat(_) => return KeyType::Flat, + DatabaseKey::TrieLog(_) => return KeyType::TrieLog, } } diff --git a/crates/client/db/src/lib.rs b/crates/client/db/src/lib.rs index 0c124c6bc3..e05f1c3efc 100644 --- a/crates/client/db/src/lib.rs +++ b/crates/client/db/src/lib.rs @@ -30,7 +30,6 @@ mod meta_db; use std::marker::PhantomData; use std::path::{Path, PathBuf}; -use std::sync::atomic::AtomicU32; use std::sync::Arc; use bonsai_db::{BonsaiDb, TrieColumn}; diff --git a/crates/client/deoxys/Cargo.toml b/crates/client/deoxys/Cargo.toml index 7c74843ffb..de23a0951c 100644 --- a/crates/client/deoxys/Cargo.toml +++ b/crates/client/deoxys/Cargo.toml @@ -42,6 +42,7 @@ serde = { workspace = true, default-features = true } tokio = { workspace = true, features = ["macros", "parking_lot", "test-util"] } url = { workspace = true } validator = { workspace = true, features = ["derive"] } +crossbeam-skiplist ={ workspace = true } madara-runtime = { workspace = true } parity-scale-codec = { workspace = true, features = ["derive"] } diff --git a/crates/client/deoxys/src/commitments/classes.rs b/crates/client/deoxys/src/commitments/classes.rs index 21554ea355..b4435f3898 100644 --- a/crates/client/deoxys/src/commitments/classes.rs +++ b/crates/client/deoxys/src/commitments/classes.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bitvec::vec::BitVec; use bonsai_trie::id::{BasicId, BasicIdBuilder}; use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; -use mc_db::bonsai_db::{BonsaiDb, TrieColumn}; +use mc_db::bonsai_db::BonsaiDb; use mc_db::BonsaiDbError; use mp_felt::Felt252Wrapper; use mp_hashers::poseidon::PoseidonHasher; diff --git a/crates/client/deoxys/src/commitments/contracts.rs b/crates/client/deoxys/src/commitments/contracts.rs index c9ba6570d7..d078273525 100644 --- a/crates/client/deoxys/src/commitments/contracts.rs +++ b/crates/client/deoxys/src/commitments/contracts.rs @@ -1,12 +1,10 @@ use std::sync::Arc; use bitvec::prelude::BitVec; -use blockifier::execution::contract_address; use blockifier::state::cached_state::CommitmentStateDiff; use bonsai_trie::id::{BasicId, BasicIdBuilder}; use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; -use ethers::addressbook::Contract; -use mc_db::bonsai_db::{BonsaiDb, TrieColumn}; +use mc_db::bonsai_db::BonsaiDb; use mc_db::BonsaiDbError; use mp_felt::Felt252Wrapper; use mp_hashers::pedersen::PedersenHasher; @@ -34,7 +32,7 @@ pub struct ContractLeafParams { /// The storage root hash. pub fn update_storage_trie( contract_address: &ContractAddress, - commitment_state_diff: CommitmentStateDiff, + csd: &Arc, bonsai_db: &Arc>, ) -> Result { let config = BonsaiStorageConfig::default(); @@ -42,7 +40,7 @@ pub fn update_storage_trie( let mut bonsai_storage: BonsaiStorage, Pedersen> = BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); - if let Some(updates) = commitment_state_diff.storage_updates.get(contract_address) { + if let Some(updates) = csd.storage_updates.get(contract_address) { for (storage_key, storage_value) in updates { let key = BitVec::from_vec(Felt252Wrapper::from(storage_key.0.0).0.to_bytes_be()[..31].to_vec()); let value = Felt252Wrapper::from(*storage_value); @@ -118,7 +116,7 @@ pub fn update_contract_trie( contract_hash: Felt252Wrapper, contract_leaf_params: ContractLeafParams, bonsai_db: &Arc>, -) -> Result { +) -> anyhow::Result { let config = BonsaiStorageConfig::default(); let bonsai_db = bonsai_db.as_ref(); let mut bonsai_storage = diff --git a/crates/client/deoxys/src/commitments/events.rs b/crates/client/deoxys/src/commitments/events.rs index add5164d0d..dc3ba11e27 100644 --- a/crates/client/deoxys/src/commitments/events.rs +++ b/crates/client/deoxys/src/commitments/events.rs @@ -1,6 +1,5 @@ use std::sync::Arc; -use anyhow::Ok; use bitvec::vec::BitVec; use bonsai_trie::databases::HashMapDb; use bonsai_trie::id::{BasicId, BasicIdBuilder}; @@ -14,6 +13,7 @@ use starknet_api::transaction::Event; use starknet_ff::FieldElement; use starknet_types_core::felt::Felt; use starknet_types_core::hash::Pedersen; +use tokio::task::{spawn_blocking, JoinSet}; /// Calculate the hash of the event. /// @@ -57,38 +57,37 @@ pub fn calculate_event_hash(event: &Event) -> FieldElement { /// # Returns /// /// The event commitment as `Felt252Wrapper`. -pub fn event_commitment( - events: &[Event], - bonsai_db: &Arc>, -) -> Result { - if events.len() > 0 { - let config = BonsaiStorageConfig::default(); - let bonsai_db = bonsai_db.as_ref(); - let mut bonsai_storage = - BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); - - let mut id_builder = BasicIdBuilder::new(); - - let zero = id_builder.new_id(); - bonsai_storage.commit(zero).expect("Failed to commit to bonsai storage"); - - for (i, event) in events.iter().enumerate() { - let event_hash = calculate_event_hash::(event); - let key = BitVec::from_vec(i.to_be_bytes().to_vec()); - let value = Felt::from(Felt252Wrapper::from(event_hash)); - bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); - } - - let id = id_builder.new_id(); - bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); +#[deprecated = "use `memory_event_commitment` instead"] +pub fn event_commitment(events: &[Event], bonsai_db: &Arc>) -> Result { + if events.is_empty() { + return Ok(Felt252Wrapper::ZERO); + } + + let config = BonsaiStorageConfig::default(); + let bonsai_db = bonsai_db.as_ref(); + let mut bonsai_storage = + BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); + + let mut id_builder = BasicIdBuilder::new(); - let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash"); - bonsai_storage.revert_to(zero).unwrap(); + let zero = id_builder.new_id(); + bonsai_storage.commit(zero).expect("Failed to commit to bonsai storage"); - Ok(Felt252Wrapper::from(root_hash)) - } else { - Ok(Felt252Wrapper::ZERO) + for (i, event) in events.iter().enumerate() { + let event_hash = calculate_event_hash::(event); + let key = BitVec::from_vec(i.to_be_bytes().to_vec()); + let value = Felt::from(Felt252Wrapper::from(event_hash)); + bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); } + + let id = id_builder.new_id(); + bonsai_storage.commit(id).map_err(|_| format!("Failed to commit to bonsai storage"))?; + + // restores the Bonsai Trie to it's previous state + let root_hash = bonsai_storage.root_hash().map_err(|_| format!("Failed to get root hash"))?; + bonsai_storage.revert_to(zero).unwrap(); + + Ok(Felt252Wrapper::from(root_hash)) } /// Calculate the event commitment in memory using HashMapDb (which is more efficient for this @@ -101,27 +100,45 @@ pub fn event_commitment( /// # Returns /// /// The event commitment as `Felt252Wrapper`. -pub fn memory_event_commitment(events: &[Event]) -> Result { - if !events.is_empty() { - let config = BonsaiStorageConfig::default(); - let bonsai_db = HashMapDb::::default(); - let mut bonsai_storage = - BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); - - for (i, event) in events.iter().enumerate() { - let event_hash = calculate_event_hash::(event); - let key = BitVec::from_vec(i.to_be_bytes().to_vec()); - let value = Felt::from(Felt252Wrapper::from(event_hash)); - bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); - } - - let mut id_builder = BasicIdBuilder::new(); - let id = id_builder.new_id(); - bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); +pub async fn memory_event_commitment(events: &[Event]) -> Result { + if events.is_empty() { + return Ok(Felt252Wrapper::ZERO); + } + + let config = BonsaiStorageConfig::default(); + let bonsai_db = HashMapDb::::default(); + let mut bonsai_storage = + BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); - let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash"); - Ok(Felt252Wrapper::from(root_hash)) - } else { - Ok(Felt252Wrapper::ZERO) + // event hashes are computed in parallel + let mut task_set = JoinSet::new(); + events.iter().cloned().enumerate().for_each(|(i, event)| { + task_set.spawn(async move { (i, calculate_event_hash::(&event)) }); + }); + + // once event hashes have finished computing, they are inserted into the local Bonsai db + while let Some(res) = task_set.join_next().await { + let (i, event_hash) = res.map_err(|e| format!("Failed to retrieve event hash: {e}"))?; + let key = BitVec::from_vec(i.to_be_bytes().to_vec()); + let value = Felt::from(Felt252Wrapper::from(event_hash)); + bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); } + + // Note that committing changes still has the greatest performance hit + // as this is where the root hash is calculated. Due to the Merkle structure + // of Bonsai Tries, this results in a trie size that grows very rapidly with + // each new insertion. It seems that the only vector of optimization here + // would be to optimize the tree traversal and hash computation. + let mut id_builder = BasicIdBuilder::new(); + let id = id_builder.new_id(); + + // run in a blocking-safe thread to avoid starving the thread pool + let root_hash = spawn_blocking(move || { + bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); + bonsai_storage.root_hash().expect("Failed to get root hash") + }) + .await + .map_err(|e| format!("Failed to computed event root hash: {e}"))?; + + Ok(Felt252Wrapper::from(root_hash)) } diff --git a/crates/client/deoxys/src/commitments/lib.rs b/crates/client/deoxys/src/commitments/lib.rs index 3ce2819f7d..afc25916c0 100644 --- a/crates/client/deoxys/src/commitments/lib.rs +++ b/crates/client/deoxys/src/commitments/lib.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use blockifier::state::cached_state::CommitmentStateDiff; use indexmap::IndexMap; use mc_db::bonsai_db::BonsaiDb; -use mc_db::{BonsaiDbError, BonsaiDbs}; +use mc_db::BonsaiDbs; use mp_block::state_update::StateUpdateWrapper; use mp_felt::Felt252Wrapper; use mp_hashers::poseidon::PoseidonHasher; @@ -14,6 +14,8 @@ use starknet_api::api_core::{ClassHash, CompiledClassHash, ContractAddress, Nonc use starknet_api::hash::StarkFelt; use starknet_api::state::StorageKey; use starknet_api::transaction::Event; +use tokio::join; +use tokio::task::{spawn_blocking, JoinSet}; use super::classes::{get_class_trie_root, update_class_trie}; use super::contracts::{get_contract_trie_root, update_contract_trie, update_storage_trie, ContractLeafParams}; @@ -32,16 +34,18 @@ use super::transactions::memory_transaction_commitment; /// # Returns /// /// The transaction and the event commitment as `Felt252Wrapper`. -pub fn calculate_commitments( +pub async fn calculate_commitments( transactions: &[Transaction], events: &[Event], chain_id: Felt252Wrapper, block_number: u64, ) -> (Felt252Wrapper, Felt252Wrapper) { + let (commitment_tx, commitment_event) = + join!(memory_transaction_commitment(transactions, chain_id, block_number), memory_event_commitment(events)); + ( - memory_transaction_commitment(transactions, chain_id, block_number) - .expect("Failed to calculate transaction commitment"), - memory_event_commitment(events).expect("Failed to calculate event commitment"), + commitment_tx.expect("Failed to calculate transaction commitment"), + commitment_event.expect("Failed to calculate event commitment"), ) } @@ -120,8 +124,6 @@ pub fn calculate_state_root( where H: HasherT, { - println!("classes_trie_root: {:?}", classes_trie_root); - println!("contracts_trie_root: {:?}", contracts_trie_root); let starknet_state_prefix = Felt252Wrapper::try_from("STARKNET_STATE_V0".as_bytes()).unwrap(); let state_commitment_hash = @@ -144,33 +146,94 @@ where /// # Returns /// /// The updated state root as a `Felt252Wrapper`. -pub fn update_state_root( +pub async fn update_state_root( csd: CommitmentStateDiff, bonsai_dbs: BonsaiDbs, -) -> Result { - let mut contract_trie_root = Felt252Wrapper::default(); - let mut class_trie_root = Felt252Wrapper::default(); +) -> anyhow::Result { + let arc_csd = Arc::new(csd); + let arc_bonsai_dbs = Arc::new(bonsai_dbs); + + let contract_trie_root = contract_trie_root(Arc::clone(&arc_csd), Arc::clone(&arc_bonsai_dbs)).await?; + + let class_trie_root = class_trie_root(Arc::clone(&arc_csd), Arc::clone(&arc_bonsai_dbs))?; + + let state_root = calculate_state_root::(contract_trie_root, class_trie_root); - for (contract_address, class_hash) in csd.address_to_class_hash.iter() { - let storage_root = update_storage_trie(contract_address, csd.clone(), &bonsai_dbs.storage) - .expect("Failed to update storage trie"); - let nonce = csd.address_to_nonce.get(contract_address).unwrap_or(&Felt252Wrapper::default().into()).clone(); + Ok(state_root) +} + +async fn contract_trie_root( + csd: Arc, + bonsai_dbs: Arc>, +) -> anyhow::Result { + // Risk of starving the thread pool (execution over 1s in some cases), must be run in a + // blocking-safe thread. Main bottleneck is still calling `commit` on the Bonsai db. + let mut task_set = spawn_blocking(move || { + let mut task_set = JoinSet::new(); - let contract_leaf_params = - ContractLeafParams { class_hash: class_hash.clone().into(), storage_root, nonce: nonce.into() }; + csd.address_to_class_hash.iter().for_each(|(contract_address, class_hash)| { + let csd_clone = Arc::clone(&csd); + let bonsai_dbs_clone = Arc::clone(&bonsai_dbs); - contract_trie_root = - update_contract_trie(contract_address.clone().into(), contract_leaf_params, &bonsai_dbs.contract)?; + task_set.spawn(contract_trie_root_loop( + csd_clone, + bonsai_dbs_clone, + contract_address.clone(), + class_hash.clone(), + )); + }); + + task_set + }) + .await?; + + // The order in which contract trie roots are waited for is not important since each call to + // `update_contract_trie` in `contract_trie_root` mutates the Deoxys db. + let mut contract_trie_root = Felt252Wrapper::ZERO; + while let Some(res) = task_set.join_next().await { + contract_trie_root = match res? { + Ok(trie_root) => trie_root, + Err(e) => { + task_set.abort_all(); + return Err(e); + } + } } + Ok(contract_trie_root) +} + +async fn contract_trie_root_loop( + csd: Arc, + bonsai_dbs: Arc>, + contract_address: ContractAddress, + class_hash: ClassHash, +) -> anyhow::Result { + let storage_root = + update_storage_trie(&contract_address, &csd, &bonsai_dbs.storage).expect("Failed to update storage trie"); + let nonce = csd.address_to_nonce.get(&contract_address).unwrap_or(&Felt252Wrapper::default().into()).clone(); + + let contract_leaf_params = + ContractLeafParams { class_hash: class_hash.clone().into(), storage_root, nonce: nonce.into() }; + + update_contract_trie(contract_address.into(), contract_leaf_params, &bonsai_dbs.contract) +} + +fn class_trie_root( + csd: Arc, + bonsai_dbs: Arc>, +) -> anyhow::Result { + let mut class_trie_root = Felt252Wrapper::default(); + + // Based on benchmarks the execution cost of computing the class tried root is negligible + // compared to the contract trie root. It is likely that parallelizing this would yield no + // observalble benefits. for (class_hash, compiled_class_hash) in csd.class_hash_to_compiled_class_hash.iter() { class_trie_root = update_class_trie(class_hash.clone().into(), compiled_class_hash.clone().into(), &bonsai_dbs.class)?; } - let state_root = calculate_state_root::(contract_trie_root, class_trie_root); - - Ok(state_root) + Ok(class_trie_root) } /// Retrieves and compute the actual state root. diff --git a/crates/client/deoxys/src/commitments/transactions.rs b/crates/client/deoxys/src/commitments/transactions.rs index cd5ab51265..2eb2a0fc91 100644 --- a/crates/client/deoxys/src/commitments/transactions.rs +++ b/crates/client/deoxys/src/commitments/transactions.rs @@ -15,6 +15,7 @@ use sp_runtime::traits::Block as BlockT; use starknet_ff::FieldElement; use starknet_types_core::felt::Felt; use starknet_types_core::hash::Pedersen; +use tokio::task::{spawn_blocking, JoinSet}; /// Compute the combined hash of the transaction hash and the signature. /// @@ -71,6 +72,7 @@ where /// # Returns /// /// The transaction commitment as `Felt252Wrapper`. +#[deprecated = "use `memory_transaction_commitment` instead"] pub fn transaction_commitment( transactions: &[Transaction], chain_id: Felt252Wrapper, @@ -78,9 +80,8 @@ pub fn transaction_commitment( bonsai_db: &Arc>, ) -> Result { let config = BonsaiStorageConfig::default(); - let bonsai_db = bonsai_db.as_ref(); let mut bonsai_storage = - BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); + BonsaiStorage::<_, _, Pedersen>::new(bonsai_db.as_ref(), config).expect("Failed to create bonsai storage"); let mut id_builder = BasicIdBuilder::new(); @@ -115,27 +116,47 @@ pub fn transaction_commitment( /// # Returns /// /// The transaction commitment as `Felt252Wrapper`. -pub fn memory_transaction_commitment( +pub async fn memory_transaction_commitment( transactions: &[Transaction], chain_id: Felt252Wrapper, block_number: u64, -) -> Result { +) -> Result { let config = BonsaiStorageConfig::default(); let bonsai_db = HashMapDb::::default(); let mut bonsai_storage = BonsaiStorage::<_, _, Pedersen>::new(bonsai_db, config).expect("Failed to create bonsai storage"); - for (i, tx) in transactions.iter().enumerate() { - let tx_hash = calculate_transaction_hash_with_signature::(tx, chain_id, block_number); + // transaction hashes are computed in parallel + let mut task_set = JoinSet::new(); + transactions.iter().cloned().enumerate().for_each(|(i, tx)| { + task_set.spawn(async move { + (i, calculate_transaction_hash_with_signature::(&tx, chain_id, block_number)) + }); + }); + + // once transaction hashes have finished computing, they are inserted into the local Bonsai db + while let Some(res) = task_set.join_next().await { + let (i, tx_hash) = res.map_err(|e| format!("Failed to retrieve transaction hash: {e}"))?; let key = BitVec::from_vec(i.to_be_bytes().to_vec()); let value = Felt::from(Felt252Wrapper::from(tx_hash)); bonsai_storage.insert(key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); } + // Note that committing changes still has the greatest performance hit + // as this is where the root hash is calculated. Due to the Merkle structure + // of Bonsai Tries, this results in a trie size that grows very rapidly with + // each new insertion. It seems that the only vector of optimization here + // would be to optimize the tree traversal and hash computation. let mut id_builder = BasicIdBuilder::new(); let id = id_builder.new_id(); - bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); - let root_hash = bonsai_storage.root_hash().expect("Failed to get root hash"); + // run in a blocking-safe thread to avoid starving the thread pool + let root_hash = spawn_blocking(move || { + bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); + bonsai_storage.root_hash().expect("Failed to get root hash") + }) + .await + .map_err(|e| format!("Failed to computed transaction root hash: {e}"))?; + Ok(Felt252Wrapper::from(root_hash)) } diff --git a/crates/client/deoxys/src/l2.rs b/crates/client/deoxys/src/l2.rs index f47b74b0e8..a5a01f8a5e 100644 --- a/crates/client/deoxys/src/l2.rs +++ b/crates/client/deoxys/src/l2.rs @@ -4,7 +4,6 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; use itertools::Itertools; -use mc_db::bonsai_db::BonsaiDb; use mc_db::BonsaiDbs; use mc_storage::OverrideHandle; use mp_block::state_update::StateUpdateWrapper; @@ -231,7 +230,8 @@ async fn fetch_block( let block = client.get_block(BlockId::Number(block_number)).await.map_err(|e| format!("failed to get block: {e}"))?; - block_sender.send(crate::convert::block(&block)).await.map_err(|e| format!("failed to dispatch block: {e}"))?; + let block_conv = crate::convert::block(block).await; + block_sender.send(block_conv).await.map_err(|e| format!("failed to dispatch block: {e}"))?; Ok(()) } @@ -240,7 +240,7 @@ pub async fn fetch_genesis_block(config: FetchConfig) -> Result( @@ -282,7 +282,7 @@ async fn fetch_state_update( .await .map_err(|e| format!("failed to get state update: {e}"))?; - let _ = verify_l2(block_number, &state_update, bonsai_dbs); + verify_l2(block_number, &state_update, bonsai_dbs).await?; Ok(state_update) } @@ -294,7 +294,7 @@ async fn fetch_genesis_state_update( let state_update = provider.get_state_update(BlockId::Number(0)).await.map_err(|e| format!("failed to get state update: {e}"))?; - let _ = verify_l2(0, &state_update, bonsai_dbs); + verify_l2(0, &state_update, bonsai_dbs).await?; Ok(state_update) } @@ -447,14 +447,19 @@ pub fn update_l2(state_update: L2StateUpdate) { } /// Verify and update the L2 state according to the latest state update -pub fn verify_l2( +pub async fn verify_l2( block_number: u64, state_update: &StateUpdate, bonsai_dbs: BonsaiDbs, ) -> Result<(), String> { let state_update_wrapper = StateUpdateWrapper::from(state_update); + let csd = build_commitment_state_diff(state_update_wrapper.clone()); - let state_root = update_state_root(csd, bonsai_dbs).expect("Failed to update state root"); + + // Main l2 sync bottleneck HERE! + let state_root = + update_state_root(csd, bonsai_dbs).await.map_err(|e| format!("Failed to update state root: {e}"))?; + let block_hash = state_update.block_hash.expect("Block hash not found in state update"); update_l2(L2StateUpdate { @@ -462,7 +467,6 @@ pub fn verify_l2( global_root: state_root.into(), block_hash: Felt252Wrapper::from(block_hash).into(), }); - println!("➡️ block_number {:?}, block_hash {:?}, state_root {:?}", block_number, block_hash, state_root); Ok(()) } diff --git a/crates/client/deoxys/src/utils/convert.rs b/crates/client/deoxys/src/utils/convert.rs index eea4337ccb..f0c920937f 100644 --- a/crates/client/deoxys/src/utils/convert.rs +++ b/crates/client/deoxys/src/utils/convert.rs @@ -8,28 +8,38 @@ use starknet_providers::sequencer::models as p; use crate::commitments::lib::calculate_commitments; -pub fn block(block: &p::Block) -> mp_block::Block { - let transactions = transactions(&block.transactions); +pub async fn block(block: p::Block) -> mp_block::Block { + // converts starknet_provider transactions and events to mp_transactions and starknet_api events + let transactions = transactions(block.transactions); let events = events(&block.transaction_receipts); + + let parent_block_hash = felt(block.parent_block_hash); let block_number = block.block_number.expect("no block number provided"); + let block_timestamp = block.timestamp; + let global_state_root = felt(block.state_root.expect("no state root provided")); let sequencer_address = block.sequencer_address.map_or(contract_address(FieldElement::ZERO), contract_address); - let (transaction_commitment, event_commitment) = commitments(&transactions, &events, block_number); - let l1_gas_price = resource_price(block.eth_l1_gas_price); + let transaction_count = transactions.len() as u128; + let event_count = events.len() as u128; + + let (transaction_commitment, event_commitment) = commitments(&transactions, &events, block_number).await; + let protocol_version = starknet_version(&block.starknet_version); + let l1_gas_price = resource_price(block.eth_l1_gas_price); + let extra_data = block.block_hash.map(|h| sp_core::U256::from_big_endian(&h.to_bytes_be())); let header = mp_block::Header { - parent_block_hash: felt(block.parent_block_hash), + parent_block_hash, block_number, - block_timestamp: block.timestamp, - global_state_root: felt(block.state_root.expect("no state root provided")), + block_timestamp, + global_state_root, sequencer_address, - transaction_count: block.transactions.len() as u128, + transaction_count, transaction_commitment, - event_count: events.len() as u128, + event_count, event_commitment, protocol_version, l1_gas_price, - extra_data: block.block_hash.map(|h| sp_core::U256::from_big_endian(&h.to_bytes_be())), + extra_data, }; let ordered_events: Vec = block @@ -43,11 +53,11 @@ pub fn block(block: &p::Block) -> mp_block::Block { mp_block::Block::new(header, transactions, ordered_events) } -fn transactions(txs: &[p::TransactionType]) -> Vec { - txs.iter().map(transaction).collect() +fn transactions(txs: Vec) -> Vec { + txs.into_iter().map(transaction).collect() } -fn transaction(transaction: &p::TransactionType) -> mp_transactions::Transaction { +fn transaction(transaction: p::TransactionType) -> mp_transactions::Transaction { match transaction { p::TransactionType::InvokeFunction(tx) => mp_transactions::Transaction::Invoke(invoke_transaction(tx)), p::TransactionType::Declare(tx) => mp_transactions::Transaction::Declare(declare_transaction(tx)), @@ -59,32 +69,32 @@ fn transaction(transaction: &p::TransactionType) -> mp_transactions::Transaction } } -fn invoke_transaction(tx: &p::InvokeFunctionTransaction) -> mp_transactions::InvokeTransaction { +fn invoke_transaction(tx: p::InvokeFunctionTransaction) -> mp_transactions::InvokeTransaction { if tx.version == FieldElement::ZERO { mp_transactions::InvokeTransaction::V0(mp_transactions::InvokeTransactionV0 { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), contract_address: felt(tx.sender_address).into(), entry_point_selector: felt(tx.entry_point_selector.expect("no entry_point_selector provided")).into(), - calldata: tx.calldata.iter().copied().map(felt).map(Into::into).collect(), + calldata: tx.calldata.into_iter().map(felt).map(Into::into).collect(), }) } else { mp_transactions::InvokeTransaction::V1(mp_transactions::InvokeTransactionV1 { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), nonce: felt(tx.nonce.expect("no nonce provided")).into(), sender_address: felt(tx.sender_address).into(), - calldata: tx.calldata.iter().copied().map(felt).map(Into::into).collect(), + calldata: tx.calldata.into_iter().map(felt).map(Into::into).collect(), offset_version: false, }) } } -fn declare_transaction(tx: &p::DeclareTransaction) -> mp_transactions::DeclareTransaction { +fn declare_transaction(tx: p::DeclareTransaction) -> mp_transactions::DeclareTransaction { if tx.version == FieldElement::ZERO { mp_transactions::DeclareTransaction::V0(mp_transactions::DeclareTransactionV0 { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), nonce: felt(tx.nonce).into(), class_hash: felt(tx.class_hash).into(), sender_address: felt(tx.sender_address).into(), @@ -92,7 +102,7 @@ fn declare_transaction(tx: &p::DeclareTransaction) -> mp_transactions::DeclareTr } else if tx.version == FieldElement::ONE { mp_transactions::DeclareTransaction::V1(mp_transactions::DeclareTransactionV1 { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), nonce: felt(tx.nonce).into(), class_hash: felt(tx.class_hash).into(), sender_address: felt(tx.sender_address).into(), @@ -101,7 +111,7 @@ fn declare_transaction(tx: &p::DeclareTransaction) -> mp_transactions::DeclareTr } else { mp_transactions::DeclareTransaction::V2(mp_transactions::DeclareTransactionV2 { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), nonce: felt(tx.nonce).into(), class_hash: felt(tx.class_hash).into(), sender_address: felt(tx.sender_address).into(), @@ -111,29 +121,29 @@ fn declare_transaction(tx: &p::DeclareTransaction) -> mp_transactions::DeclareTr } } -fn deploy_transaction(tx: &p::DeployTransaction) -> mp_transactions::DeployTransaction { +fn deploy_transaction(tx: p::DeployTransaction) -> mp_transactions::DeployTransaction { mp_transactions::DeployTransaction { version: starknet_api::transaction::TransactionVersion(felt(tx.version)), class_hash: felt(tx.class_hash).into(), contract_address: felt(tx.contract_address).into(), contract_address_salt: felt(tx.contract_address_salt).into(), - constructor_calldata: tx.constructor_calldata.iter().copied().map(felt).map(Into::into).collect(), + constructor_calldata: tx.constructor_calldata.into_iter().map(felt).map(Into::into).collect(), } } -fn deploy_account_transaction(tx: &p::DeployAccountTransaction) -> mp_transactions::DeployAccountTransaction { +fn deploy_account_transaction(tx: p::DeployAccountTransaction) -> mp_transactions::DeployAccountTransaction { mp_transactions::DeployAccountTransaction { max_fee: fee(tx.max_fee.expect("no max fee provided")), - signature: tx.signature.iter().copied().map(felt).map(Into::into).collect(), + signature: tx.signature.into_iter().map(felt).map(Into::into).collect(), nonce: felt(tx.nonce).into(), contract_address_salt: felt(tx.contract_address_salt).into(), - constructor_calldata: tx.constructor_calldata.iter().copied().map(felt).map(Into::into).collect(), + constructor_calldata: tx.constructor_calldata.into_iter().map(felt).map(Into::into).collect(), class_hash: felt(tx.class_hash).into(), offset_version: false, } } -fn l1_handler_transaction(tx: &p::L1HandlerTransaction) -> mp_transactions::HandleL1MessageTransaction { +fn l1_handler_transaction(tx: p::L1HandlerTransaction) -> mp_transactions::HandleL1MessageTransaction { mp_transactions::HandleL1MessageTransaction { nonce: tx .nonce @@ -145,7 +155,7 @@ fn l1_handler_transaction(tx: &p::L1HandlerTransaction) -> mp_transactions::Hand }), contract_address: felt(tx.contract_address).into(), entry_point_selector: felt(tx.entry_point_selector).into(), - calldata: tx.calldata.iter().copied().map(felt).map(Into::into).collect(), + calldata: tx.calldata.into_iter().map(felt).map(Into::into).collect(), } } @@ -187,16 +197,16 @@ fn event(event: &p::Event) -> starknet_api::transaction::Event { } } -fn commitments( +async fn commitments( transactions: &[mp_transactions::Transaction], events: &[starknet_api::transaction::Event], block_number: u64, ) -> (StarkFelt, StarkFelt) { let chain_id = chain_id(); - let (a, b) = calculate_commitments(transactions, events, chain_id, block_number); + let (commitment_tx, commitment_event) = calculate_commitments(transactions, events, chain_id, block_number).await; - (a.into(), b.into()) + (commitment_tx.into(), commitment_event.into()) } fn chain_id() -> mp_felt::Felt252Wrapper { diff --git a/crates/primitives/transactions/src/lib.rs b/crates/primitives/transactions/src/lib.rs index 832eb133e4..523b367543 100644 --- a/crates/primitives/transactions/src/lib.rs +++ b/crates/primitives/transactions/src/lib.rs @@ -117,7 +117,7 @@ pub enum UserTransaction { Invoke(InvokeTransaction), } -#[derive(Clone, Debug, Eq, PartialEq, From)] +#[derive(Clone, Debug, Eq, PartialEq, From, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub enum Transaction { @@ -136,7 +136,7 @@ pub enum UserOrL1HandlerTransaction { L1Handler(HandleL1MessageTransaction, Fee), } -#[derive(Debug, Clone, Eq, PartialEq, From)] +#[derive(Debug, Clone, Eq, PartialEq, From, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub enum InvokeTransaction { @@ -144,7 +144,7 @@ pub enum InvokeTransaction { V1(InvokeTransactionV1), } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct InvokeTransactionV0 { @@ -155,7 +155,7 @@ pub struct InvokeTransactionV0 { pub calldata: Vec, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct InvokeTransactionV1 { @@ -167,7 +167,7 @@ pub struct InvokeTransactionV1 { pub offset_version: bool, } -#[derive(Debug, Clone, Eq, PartialEq, From)] +#[derive(Debug, Clone, Eq, PartialEq, From, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub enum DeclareTransaction { @@ -176,7 +176,7 @@ pub enum DeclareTransaction { V2(DeclareTransactionV2), } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct DeclareTransactionV0 { @@ -187,7 +187,7 @@ pub struct DeclareTransactionV0 { pub sender_address: Felt252Wrapper, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct DeclareTransactionV1 { @@ -199,7 +199,7 @@ pub struct DeclareTransactionV1 { pub offset_version: bool, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct DeclareTransactionV2 { @@ -212,7 +212,7 @@ pub struct DeclareTransactionV2 { pub offset_version: bool, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct DeployAccountTransaction { @@ -225,7 +225,7 @@ pub struct DeployAccountTransaction { pub offset_version: bool, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct DeployTransaction { @@ -236,7 +236,7 @@ pub struct DeployTransaction { pub constructor_calldata: Vec, } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] #[cfg_attr(feature = "parity-scale-codec", derive(parity_scale_codec::Encode, parity_scale_codec::Decode))] #[cfg_attr(feature = "scale-info", derive(scale_info::TypeInfo))] pub struct HandleL1MessageTransaction {